From bc97da0d13a3b02e2abfb9e653cda4a02a5c3dd7 Mon Sep 17 00:00:00 2001
From: xpai <73411968+xpai@users.noreply.github.com>
Date: Fri, 7 Jun 2024 21:44:32 +0800
Subject: [PATCH] Update outdated links
---
docs/CTR/index.md | 2 +-
docs/CTR/leaderboard/avazu_x4_001.csv | 4 +
docs/CTR/leaderboard/criteo_x4_001.csv | 4 +
docs/CTR/leaderboard/index.md | 2 +-
docs/CTR/leaderboard/seqctr.md | 4 +
docs/Matching/index.md | 2 +-
docs/Matching/leaderboard/index.md | 2 +-
docs/_toc.yml | 4 +-
ranking/ctr/AFM/AFM_avazu_x1/README.md | 8 +-
ranking/ctr/AFM/AFM_avazu_x4_001/README.md | 8 +-
ranking/ctr/AFM/AFM_avazu_x4_002/README.md | 8 +-
ranking/ctr/AFM/AFM_criteo_x1/README.md | 8 +-
ranking/ctr/AFM/AFM_criteo_x4_001/README.md | 8 +-
ranking/ctr/AFM/AFM_criteo_x4_002/README.md | 8 +-
ranking/ctr/AFM/AFM_frappe_x1/README.md | 8 +-
ranking/ctr/AFM/AFM_kkbox_x1/README.md | 8 +-
.../ctr/AFM/AFM_movielenslatest_x1/README.md | 8 +-
ranking/ctr/AFN/AFN+_avazu_x1/README.md | 8 +-
ranking/ctr/AFN/AFN+_avazu_x4_001/README.md | 8 +-
ranking/ctr/AFN/AFN+_avazu_x4_002/README.md | 8 +-
ranking/ctr/AFN/AFN+_criteo_x1/README.md | 8 +-
ranking/ctr/AFN/AFN+_criteo_x4_001/README.md | 8 +-
ranking/ctr/AFN/AFN+_criteo_x4_002/README.md | 8 +-
ranking/ctr/AFN/AFN+_frappe_x1/README.md | 8 +-
ranking/ctr/AFN/AFN+_kkbox_x1/README.md | 6 +-
.../ctr/AFN/AFN+_movielenslatest_x1/README.md | 8 +-
ranking/ctr/AFN/AFN_avazu_x1/README.md | 8 +-
ranking/ctr/AFN/AFN_avazu_x4_001/README.md | 8 +-
ranking/ctr/AFN/AFN_avazu_x4_002/README.md | 8 +-
ranking/ctr/AFN/AFN_criteo_x1/README.md | 8 +-
ranking/ctr/AFN/AFN_criteo_x4_001/README.md | 8 +-
ranking/ctr/AFN/AFN_criteo_x4_002/README.md | 8 +-
ranking/ctr/AFN/AFN_frappe_x1/README.md | 8 +-
ranking/ctr/AFN/AFN_kkbox_x1/README.md | 6 +-
.../ctr/AFN/AFN_movielenslatest_x1/README.md | 8 +-
.../AOANet_amazonelectronics_x1/README.md | 8 +-
ranking/ctr/AOANet/AOANet_avazu_x1/README.md | 376 +--
ranking/ctr/AOANet/AOANet_criteo_x1/README.md | 474 +--
ranking/ctr/AOANet/AOANet_frappe_x1/README.md | 432 +--
.../ctr/AOANet/AOANet_kuaivideo_x1/README.md | 8 +-
.../AOANet/AOANet_microvideo1.7m_x1/README.md | 8 +-
.../AOANet_movielenslatest_x1/README.md | 462 +--
.../ctr/AOANet/AOANet_taobaoad_x1/README.md | 8 +-
.../AutoInt+_amazonelectronics_x1/README.md | 8 +-
.../ctr/AutoInt/AutoInt+_avazu_x1/README.md | 8 +-
.../AutoInt/AutoInt+_avazu_x4_001/README.md | 8 +-
.../AutoInt/AutoInt+_avazu_x4_002/README.md | 8 +-
.../ctr/AutoInt/AutoInt+_criteo_x1/README.md | 8 +-
.../AutoInt/AutoInt+_criteo_x4_001/README.md | 8 +-
.../AutoInt/AutoInt+_criteo_x4_002/README.md | 8 +-
.../ctr/AutoInt/AutoInt+_frappe_x1/README.md | 8 +-
.../ctr/AutoInt/AutoInt+_kkbox_x1/README.md | 6 +-
.../AutoInt/AutoInt+_kuaivideo_x1/README.md | 8 +-
.../AutoInt+_microvideo1.7m_x1/README.md | 8 +-
.../AutoInt+_movielenslatest_x1/README.md | 8 +-
.../AutoInt/AutoInt+_taobaoad_x1/README.md | 8 +-
.../ctr/AutoInt/AutoInt_avazu_x1/README.md | 8 +-
.../AutoInt/AutoInt_avazu_x4_001/README.md | 8 +-
.../AutoInt/AutoInt_avazu_x4_002/README.md | 8 +-
.../ctr/AutoInt/AutoInt_criteo_x1/README.md | 8 +-
.../AutoInt/AutoInt_criteo_x4_001/README.md | 8 +-
.../AutoInt/AutoInt_criteo_x4_002/README.md | 8 +-
.../ctr/AutoInt/AutoInt_frappe_x1/README.md | 8 +-
.../ctr/AutoInt/AutoInt_kkbox_x1/README.md | 8 +-
.../AutoInt_movielenslatest_x1/README.md | 8 +-
.../BST/BST_amazonelectronics_x1/README.md | 8 +-
ranking/ctr/BST/BST_kuaivideo_x1/README.md | 8 +-
.../ctr/BST/BST_microvideo1.7m_x1/README.md | 8 +-
ranking/ctr/BST/BST_taobaoad_x1/README.md | 8 +-
ranking/ctr/CCPM/CCPM_avazu_x4_001/README.md | 8 +-
ranking/ctr/CCPM/CCPM_avazu_x4_002/README.md | 8 +-
ranking/ctr/CCPM/CCPM_criteo_x4_001/README.md | 8 +-
ranking/ctr/CCPM/CCPM_criteo_x4_002/README.md | 8 +-
ranking/ctr/DCN/CrossNet_avazu_x1/README.md | 8 +-
.../ctr/DCN/CrossNet_avazu_x4_001/README.md | 8 +-
.../ctr/DCN/CrossNet_avazu_x4_002/README.md | 8 +-
ranking/ctr/DCN/CrossNet_criteo_x1/README.md | 6 +-
.../ctr/DCN/CrossNet_criteo_x4_001/README.md | 8 +-
.../ctr/DCN/CrossNet_criteo_x4_002/README.md | 8 +-
ranking/ctr/DCN/CrossNet_frappe_x1/README.md | 8 +-
ranking/ctr/DCN/CrossNet_kkbox_x1/README.md | 6 +-
.../DCN/CrossNet_movielenslatest_x1/README.md | 8 +-
.../DCN/DCN_amazonelectronics_x1/README.md | 8 +-
ranking/ctr/DCN/DCN_avazu_x1/README.md | 8 +-
ranking/ctr/DCN/DCN_avazu_x4_001/README.md | 8 +-
ranking/ctr/DCN/DCN_avazu_x4_002/README.md | 8 +-
ranking/ctr/DCN/DCN_criteo_x1/README.md | 8 +-
ranking/ctr/DCN/DCN_criteo_x4_001/README.md | 8 +-
ranking/ctr/DCN/DCN_criteo_x4_002/README.md | 8 +-
ranking/ctr/DCN/DCN_frappe_x1/README.md | 8 +-
ranking/ctr/DCN/DCN_kkbox_x1/README.md | 708 ++---
ranking/ctr/DCN/DCN_kuaivideo_x1/README.md | 8 +-
.../ctr/DCN/DCN_microvideo1.7m_x1/README.md | 8 +-
.../ctr/DCN/DCN_movielenslatest_x1/README.md | 8 +-
ranking/ctr/DCN/DCN_taobaoad_x1/README.md | 8 +-
.../ctr/DCNv2/CrossNetv2_avazu_x1/README.md | 8 +-
.../ctr/DCNv2/CrossNetv2_criteo_x1/README.md | 8 +-
.../ctr/DCNv2/CrossNetv2_frappe_x1/README.md | 6 +-
.../CrossNetv2_movielenslatest_x1/README.md | 592 ++--
.../DCNv2_amazonelectronics_x1/README.md | 8 +-
ranking/ctr/DCNv2/DCNv2_avazu_x1/README.md | 8 +-
.../DCNv2_avazu_x4_011_19794dc6.log | 86 +
.../DCNv2_avazu_x4_tuner_config_02.csv | 12 +
.../DCNv2_avazu_x4_tuner_config_02.yaml | 39 +
.../dataset_config.yaml | 19 +
.../model_config.yaml | 444 +++
.../ctr/DCNv2/DCNv2_avazu_x4_001/README.md | 161 +
.../DCNv2/DCNv2_avazu_x4_001/environments.txt | 17 +
.../ctr/DCNv2/DCNv2_avazu_x4_001/results.csv | 1 +
ranking/ctr/DCNv2/DCNv2_criteo_x1/README.md | 8 +-
.../DCNv2_criteo_x4_001_005_c2376d55.log | 159 +
.../DCNv2_criteo_x4_tuner_config_01.csv | 16 +
.../DCNv2_criteo_x4_tuner_config_01.yaml | 45 +
.../dataset_config.yaml | 21 +
.../model_config.yaml | 624 ++++
.../ctr/DCNv2/DCNv2_criteo_x4_001/README.md | 234 ++
.../DCNv2_criteo_x4_001/environments.txt | 18 +
.../ctr/DCNv2/DCNv2_criteo_x4_001/results.csv | 1 +
ranking/ctr/DCNv2/DCNv2_frappe_x1/README.md | 8 +-
.../ctr/DCNv2/DCNv2_kuaivideo_x1/README.md | 8 +-
.../DCNv2/DCNv2_microvideo1.7m_x1/README.md | 8 +-
.../DCNv2/DCNv2_movielenslatest_x1/README.md | 8 +-
ranking/ctr/DCNv2/DCNv2_taobaoad_x1/README.md | 8 +-
.../ctr/DESTINE/DESTINE_avazu_x1/README.md | 6 +-
.../ctr/DESTINE/DESTINE_criteo_x1/README.md | 6 +-
.../ctr/DESTINE/DESTINE_frappe_x1/README.md | 6 +-
.../DESTINE_movielenslatest_x1/README.md | 6 +-
.../DIEN/DIEN_amazonelectronics_x1/README.md | 8 +-
ranking/ctr/DIEN/DIEN_kuaivideo_x1/README.md | 8 +-
.../ctr/DIEN/DIEN_microvideo1.7m_x1/README.md | 8 +-
ranking/ctr/DIEN/DIEN_taobaoad_x1/README.md | 8 +-
.../DIN/DIN_amazonelectronics_x1/README.md | 8 +-
ranking/ctr/DIN/DIN_kuaivideo_x1/README.md | 8 +-
.../ctr/DIN/DIN_microvideo1.7m_x1/README.md | 8 +-
ranking/ctr/DIN/DIN_taobaoad_x1/README.md | 8 +-
ranking/ctr/DLRM/DLRM_avazu_x1/README.md | 368 +--
ranking/ctr/DLRM/DLRM_criteo_x1/README.md | 458 +--
ranking/ctr/DLRM/DLRM_frappe_x1/README.md | 446 +--
.../DLRM/DLRM_movielenslatest_x1/README.md | 476 +--
.../DNN/DNN_amazonelectronics_x1/README.md | 8 +-
ranking/ctr/DNN/DNN_avazu_x1/README.md | 8 +-
ranking/ctr/DNN/DNN_avazu_x4_001/README.md | 8 +-
ranking/ctr/DNN/DNN_avazu_x4_002/README.md | 8 +-
ranking/ctr/DNN/DNN_criteo_x1/README.md | 8 +-
ranking/ctr/DNN/DNN_criteo_x4_001/README.md | 8 +-
ranking/ctr/DNN/DNN_criteo_x4_002/README.md | 8 +-
ranking/ctr/DNN/DNN_frappe_x1/README.md | 8 +-
ranking/ctr/DNN/DNN_kkbox_x1/README.md | 6 +-
ranking/ctr/DNN/DNN_kuaivideo_x1/README.md | 8 +-
.../ctr/DNN/DNN_microvideo1.7m_x1/README.md | 8 +-
.../ctr/DNN/DNN_movielenslatest_x1/README.md | 8 +-
ranking/ctr/DNN/DNN_taobaoad_x1/README.md | 8 +-
ranking/ctr/DSSM/DSSM_frappe_x1/README.md | 516 ++--
.../DSSM/DSSM_movielenslatest_x1/README.md | 414 +--
.../DeepCross_avazu_x4_001/README.md | 8 +-
.../DeepCross_avazu_x4_002/README.md | 8 +-
.../DeepCross_criteo_x4_001/README.md | 8 +-
.../DeepCross_criteo_x4_002/README.md | 8 +-
.../DeepCrossing/DeepCross_kkbox_x1/README.md | 6 +-
.../DeepFM_amazonelectronics_x1/README.md | 8 +-
ranking/ctr/DeepFM/DeepFM_avazu_x1/README.md | 8 +-
.../ctr/DeepFM/DeepFM_avazu_x4_001/README.md | 302 +-
.../ctr/DeepFM/DeepFM_avazu_x4_002/README.md | 8 +-
ranking/ctr/DeepFM/DeepFM_criteo_x1/README.md | 8 +-
.../ctr/DeepFM/DeepFM_criteo_x4_001/README.md | 8 +-
.../ctr/DeepFM/DeepFM_criteo_x4_002/README.md | 8 +-
ranking/ctr/DeepFM/DeepFM_frappe_x1/README.md | 8 +-
ranking/ctr/DeepFM/DeepFM_kkbox_x1/README.md | 8 +-
.../ctr/DeepFM/DeepFM_kuaivideo_x1/README.md | 8 +-
.../DeepFM/DeepFM_microvideo1.7m_x1/README.md | 8 +-
.../DeepFM_movielenslatest_x1/README.md | 8 +-
.../ctr/DeepFM/DeepFM_taobaoad_x1/README.md | 8 +-
ranking/ctr/DeepIM/DeepIM_avazu_x1/README.md | 8 +-
ranking/ctr/DeepIM/DeepIM_criteo_x1/README.md | 8 +-
ranking/ctr/DeepIM/DeepIM_frappe_x1/README.md | 8 +-
.../DeepIM_movielenslatest_x1/README.md | 8 +-
ranking/ctr/EDCN/EDCN_avazu_x1/README.md | 8 +-
ranking/ctr/EDCN/EDCN_criteo_x1/README.md | 8 +-
ranking/ctr/EDCN/EDCN_frappe_x1/README.md | 8 +-
.../EDCN/EDCN_movielenslatest_x1/README.md | 8 +-
ranking/ctr/FFM/FFM_avazu_x1/README.md | 8 +-
ranking/ctr/FFM/FFM_avazu_x4_001/README.md | 8 +-
ranking/ctr/FFM/FFM_avazu_x4_002/README.md | 8 +-
ranking/ctr/FFM/FFM_criteo_x1/README.md | 8 +-
ranking/ctr/FFM/FFM_criteo_x4_001/README.md | 8 +-
ranking/ctr/FFM/FFM_criteo_x4_002/README.md | 8 +-
ranking/ctr/FFM/FFM_frappe_x1/README.md | 8 +-
ranking/ctr/FFM/FFM_kkbox_x1/README.md | 6 +-
.../ctr/FFM/FFM_movielenslatest_x1/README.md | 8 +-
.../ctr/FGCNN/FGCNN_avazu_x4_001/README.md | 8 +-
.../ctr/FGCNN/FGCNN_avazu_x4_002/README.md | 8 +-
.../ctr/FGCNN/FGCNN_criteo_x4_001/README.md | 384 +--
.../ctr/FGCNN/FGCNN_criteo_x4_002/README.md | 8 +-
ranking/ctr/FGCNN/FGCNN_kkbox_x1/README.md | 514 ++--
ranking/ctr/FLEN/FLEN_avazu_x4_001/README.md | 8 +-
.../ctr/FM/FM_amazonelectronics_x1/README.md | 8 +-
ranking/ctr/FM/FM_avazu_x1/README.md | 8 +-
ranking/ctr/FM/FM_avazu_x4_001/README.md | 8 +-
ranking/ctr/FM/FM_avazu_x4_002/README.md | 8 +-
ranking/ctr/FM/FM_criteo_x1/README.md | 8 +-
ranking/ctr/FM/FM_criteo_x4_001/README.md | 8 +-
ranking/ctr/FM/FM_criteo_x4_002/README.md | 8 +-
ranking/ctr/FM/FM_frappe_x1/README.md | 8 +-
ranking/ctr/FM/FM_kkbox_x1/README.md | 6 +-
ranking/ctr/FM/FM_kuaivideo_x1/README.md | 8 +-
ranking/ctr/FM/FM_microvideo1.7m_x1/README.md | 8 +-
.../ctr/FM/FM_movielenslatest_x1/README.md | 8 +-
ranking/ctr/FM/FM_taobaoad_x1/README.md | 8 +-
.../ctr/FiBiNET/FiBiNET_avazu_x1/README.md | 8 +-
.../FiBiNET/FiBiNET_avazu_x4_001/README.md | 8 +-
.../FiBiNET/FiBiNET_avazu_x4_002/README.md | 8 +-
.../ctr/FiBiNET/FiBiNET_criteo_x1/README.md | 8 +-
.../FiBiNET/FiBiNET_criteo_x4_001/README.md | 8 +-
.../FiBiNET/FiBiNET_criteo_x4_002/README.md | 8 +-
.../ctr/FiBiNET/FiBiNET_frappe_x1/README.md | 8 +-
.../ctr/FiBiNET/FiBiNET_kkbox_x1/README.md | 6 +-
.../FiBiNET_movielenslatest_x1/README.md | 8 +-
ranking/ctr/FiGNN/FiGNN_avazu_x1/README.md | 8 +-
.../ctr/FiGNN/FiGNN_avazu_x4_001/README.md | 8 +-
.../ctr/FiGNN/FiGNN_avazu_x4_002/README.md | 8 +-
ranking/ctr/FiGNN/FiGNN_criteo_x1/README.md | 8 +-
.../ctr/FiGNN/FiGNN_criteo_x4_001/README.md | 8 +-
.../ctr/FiGNN/FiGNN_criteo_x4_002/README.md | 8 +-
ranking/ctr/FiGNN/FiGNN_frappe_x1/README.md | 8 +-
ranking/ctr/FiGNN/FiGNN_kkbox_x1/README.md | 6 +-
.../FiGNN/FiGNN_movielenslatest_x1/README.md | 8 +-
.../ctr/FinalMLP/FinalMLP_avazu_x1/README.md | 6 +-
.../FinalMLP_avazu_x4_001_006_a7c95fe1.log | 110 +
.../FinalMLP_avazu_x4_tuner_config_03.csv | 32 +
.../FinalMLP_avazu_x4_tuner_config_03.yaml | 44 +
.../dataset_config.yaml | 19 +
.../model_config.yaml | 2688 ++++++++++++++++
.../FinalMLP/FinalMLP_avazu_x4_001/README.md | 186 ++
.../FinalMLP_avazu_x4_001/environments.txt | 18 +
.../FinalMLP_avazu_x4_001/results.csv | 1 +
.../ctr/FinalMLP/FinalMLP_criteo_x1/README.md | 6 +-
.../FinalMLP_criteo_x4_001_002_53a37ddd.log | 172 ++
.../FinalMLP_criteo_x4_tuner_config_03.csv | 48 +
.../FinalMLP_criteo_x4_tuner_config_03.yaml | 48 +
.../dataset_config.yaml | 21 +
.../model_config.yaml | 2016 ++++++++++++
.../FinalMLP/FinalMLP_criteo_x4_001/README.md | 247 ++
.../FinalMLP_criteo_x4_001/environments.txt | 18 +
.../FinalMLP_criteo_x4_001/results.csv | 1 +
.../ctr/FinalMLP/FinalMLP_frappe_x1/README.md | 6 +-
.../FinalMLP_movielenslatest_x1/README.md | 6 +-
.../FinalNet_1B_avazu_x1/README.md | 6 +-
.../FinalNet_2B_avazu_x1/README.md | 6 +-
.../FinalNet_avazu_x4_001_015_4b405413.log | 106 +
.../FinalNet_avazu_x4_tuner_config_04.csv | 40 +
.../FinalNet_avazu_x4_tuner_config_04.yaml | 42 +
.../dataset_config.yaml | 19 +
.../model_config.yaml | 1520 +++++++++
.../FinalNet/FinalNet_avazu_x4_001/README.md | 182 ++
.../FinalNet_avazu_x4_001/environments.txt | 18 +
.../FinalNet_avazu_x4_001/results.csv | 1 +
.../FinalNet_1B_criteo_x1/README.md | 6 +-
.../FinalNet_2B_criteo_x1/README.md | 6 +-
.../FinalNet_criteo_x4_001_041_449ccb21.log | 188 ++
.../FinalNet_criteo_x4_tuner_config_05.csv | 72 +
.../FinalNet_criteo_x4_tuner_config_05.yaml | 46 +
.../dataset_config.yaml | 21 +
.../model_config.yaml | 2736 +++++++++++++++++
.../FinalNet/FinalNet_criteo_x4_001/README.md | 263 ++
.../FinalNet_criteo_x4_001/environments.txt | 18 +
.../FinalNet_criteo_x4_001/results.csv | 1 +
.../FinalNet_1B_frappe_x1/README.md | 6 +-
.../FinalNet_2B_frappe_x1/README.md | 6 +-
.../FinalNet_1B_movielenslatest_x1/README.md | 6 +-
.../FinalNet_2B_movielenslatest_x1/README.md | 6 +-
.../FmFM/FmFM_amazonelectronics_x1/README.md | 8 +-
ranking/ctr/FmFM/FmFM_avazu_x1/README.md | 8 +-
ranking/ctr/FmFM/FmFM_criteo_x1/README.md | 8 +-
ranking/ctr/FmFM/FmFM_frappe_x1/README.md | 8 +-
ranking/ctr/FmFM/FmFM_kuaivideo_x1/README.md | 8 +-
.../ctr/FmFM/FmFM_microvideo1.7m_x1/README.md | 8 +-
.../FmFM/FmFM_movielenslatest_x1/README.md | 8 +-
ranking/ctr/FmFM/FmFM_taobaoad_x1/README.md | 8 +-
ranking/ctr/FwFM/FwFM_avazu_x1/README.md | 8 +-
ranking/ctr/FwFM/FwFM_avazu_x4_001/README.md | 8 +-
ranking/ctr/FwFM/FwFM_avazu_x4_002/README.md | 8 +-
ranking/ctr/FwFM/FwFM_criteo_x1/README.md | 8 +-
ranking/ctr/FwFM/FwFM_criteo_x4_001/README.md | 8 +-
ranking/ctr/FwFM/FwFM_criteo_x4_002/README.md | 8 +-
ranking/ctr/FwFM/FwFM_frappe_x1/README.md | 8 +-
ranking/ctr/FwFM/FwFM_kkbox_x1/README.md | 6 +-
.../FwFM/FwFM_movielenslatest_x1/README.md | 410 +--
ranking/ctr/HFM/HFM+_avazu_x1/README.md | 8 +-
ranking/ctr/HFM/HFM+_avazu_x4_001/README.md | 8 +-
ranking/ctr/HFM/HFM+_avazu_x4_002/README.md | 8 +-
ranking/ctr/HFM/HFM+_criteo_x1/README.md | 6 +-
ranking/ctr/HFM/HFM+_criteo_x4_001/README.md | 8 +-
ranking/ctr/HFM/HFM+_criteo_x4_002/README.md | 8 +-
ranking/ctr/HFM/HFM+_frappe_x1/README.md | 8 +-
ranking/ctr/HFM/HFM+_kkbox_x1/README.md | 6 +-
.../ctr/HFM/HFM+_movielenslatest_x1/README.md | 8 +-
ranking/ctr/HFM/HFM_avazu_x1/README.md | 8 +-
ranking/ctr/HFM/HFM_avazu_x4_001/README.md | 8 +-
ranking/ctr/HFM/HFM_avazu_x4_002/README.md | 8 +-
ranking/ctr/HFM/HFM_criteo_x1/README.md | 8 +-
ranking/ctr/HFM/HFM_criteo_x4_001/README.md | 8 +-
ranking/ctr/HFM/HFM_criteo_x4_002/README.md | 8 +-
ranking/ctr/HFM/HFM_kkbox_x1/README.md | 6 +-
.../ctr/HFM/HFM_movielenslatest_x1/README.md | 8 +-
ranking/ctr/HOFM/HOFM_avazu_x1/README.md | 8 +-
ranking/ctr/HOFM/HOFM_avazu_x4_001/README.md | 8 +-
ranking/ctr/HOFM/HOFM_avazu_x4_002/README.md | 8 +-
ranking/ctr/HOFM/HOFM_criteo_x1/README.md | 8 +-
ranking/ctr/HOFM/HOFM_criteo_x4_001/README.md | 8 +-
ranking/ctr/HOFM/HOFM_criteo_x4_002/README.md | 548 ++--
ranking/ctr/HOFM/HOFM_frappe_x1/README.md | 8 +-
ranking/ctr/HOFM/HOFM_kkbox_x1/README.md | 6 +-
.../HOFM/HOFM_movielenslatest_x1/README.md | 8 +-
.../InterHAt/InterHAt_avazu_x4_001/README.md | 8 +-
.../InterHAt/InterHAt_avazu_x4_002/README.md | 8 +-
.../InterHAt/InterHAt_criteo_x4_001/README.md | 8 +-
.../InterHAt/InterHAt_criteo_x4_002/README.md | 8 +-
.../ctr/InterHAt/InterHAt_kkbox_x1/README.md | 8 +-
ranking/ctr/LR/LR_avazu_x1/README.md | 8 +-
ranking/ctr/LR/LR_avazu_x4_001/README.md | 8 +-
ranking/ctr/LR/LR_avazu_x4_002/README.md | 8 +-
ranking/ctr/LR/LR_criteo_x1/README.md | 8 +-
ranking/ctr/LR/LR_criteo_x4_001/README.md | 8 +-
ranking/ctr/LR/LR_criteo_x4_002/README.md | 8 +-
ranking/ctr/LR/LR_frappe_x1/README.md | 8 +-
ranking/ctr/LR/LR_kkbox_x1/README.md | 8 +-
.../ctr/LR/LR_movielenslatest_x1/README.md | 8 +-
.../LorentzFM_avazu_x4_001/README.md | 8 +-
.../LorentzFM_avazu_x4_002/README.md | 8 +-
.../LorentzFM_criteo_x4_001/README.md | 8 +-
.../LorentzFM_criteo_x4_002/README.md | 8 +-
.../LorentzFM/LorentzFM_kkbox_x1/README.md | 592 ++--
.../ctr/MaskNet/MaskNet_avazu_x1/README.md | 6 +-
.../MaskNet_avazu_x4_001_019_541571c0.log | 106 +
.../MaskNet_avazu_x4_tuner_config_03.csv | 36 +
.../MaskNet_avazu_x4_tuner_config_03.yaml | 42 +
.../dataset_config.yaml | 19 +
.../model_config.yaml | 1368 +++++++++
.../MaskNet/MaskNet_avazu_x4_001/README.md | 182 ++
.../MaskNet_avazu_x4_001/environments.txt | 18 +
.../MaskNet/MaskNet_avazu_x4_001/results.csv | 1 +
.../ctr/MaskNet/MaskNet_criteo_x1/README.md | 6 +-
.../MaskNet_criteo_x4_001_018_ccc857cd.log | 168 +
.../MaskNet_criteo_x4_tuner_config_06.csv | 36 +
.../MaskNet_criteo_x4_tuner_config_06.yaml | 46 +
.../dataset_config.yaml | 21 +
.../model_config.yaml | 1368 +++++++++
.../MaskNet/MaskNet_criteo_x4_001/README.md | 243 ++
.../MaskNet_criteo_x4_001/environments.txt | 18 +
.../MaskNet/MaskNet_criteo_x4_001/results.csv | 1 +
.../ctr/MaskNet/MaskNet_frappe_x1/README.md | 6 +-
.../MaskNet_movielenslatest_x1/README.md | 6 +-
ranking/ctr/NFM/NFM_avazu_x1/README.md | 8 +-
ranking/ctr/NFM/NFM_avazu_x4_001/README.md | 8 +-
ranking/ctr/NFM/NFM_avazu_x4_002/README.md | 8 +-
ranking/ctr/NFM/NFM_criteo_x1/README.md | 8 +-
ranking/ctr/NFM/NFM_criteo_x4_001/README.md | 8 +-
ranking/ctr/NFM/NFM_criteo_x4_002/README.md | 8 +-
ranking/ctr/NFM/NFM_frappe_x1/README.md | 8 +-
ranking/ctr/NFM/NFM_kkbox_x1/README.md | 6 +-
.../ctr/NFM/NFM_movielenslatest_x1/README.md | 8 +-
ranking/ctr/ONN/ONN_avazu_x4_001/README.md | 8 +-
ranking/ctr/ONN/ONN_avazu_x4_002/README.md | 8 +-
ranking/ctr/ONN/ONN_criteo_x4_001/README.md | 8 +-
ranking/ctr/ONN/ONN_criteo_x4_002/README.md | 8 +-
ranking/ctr/ONN/ONN_kkbox_x1/README.md | 6 +-
ranking/ctr/PNN/IPNN_avazu_x1/README.md | 8 +-
ranking/ctr/PNN/IPNN_avazu_x4_001/README.md | 8 +-
ranking/ctr/PNN/IPNN_avazu_x4_002/README.md | 8 +-
ranking/ctr/PNN/IPNN_criteo_x1/README.md | 8 +-
ranking/ctr/PNN/IPNN_criteo_x4_001/README.md | 8 +-
ranking/ctr/PNN/IPNN_criteo_x4_002/README.md | 8 +-
ranking/ctr/PNN/IPNN_frappe_x1/README.md | 8 +-
ranking/ctr/PNN/IPNN_kkbox_x1/README.md | 6 +-
.../ctr/PNN/IPNN_movielenslatest_x1/README.md | 8 +-
ranking/ctr/SAM/SAM_avazu_x1/README.md | 374 +--
ranking/ctr/SAM/SAM_criteo_x1/README.md | 462 +--
ranking/ctr/SAM/SAM_frappe_x1/README.md | 740 ++---
.../ctr/SAM/SAM_movielenslatest_x1/README.md | 510 +--
.../ctr/WideDeep/WideDeep_avazu_x1/README.md | 8 +-
.../WideDeep/WideDeep_avazu_x4_001/README.md | 8 +-
.../WideDeep/WideDeep_avazu_x4_002/README.md | 8 +-
.../ctr/WideDeep/WideDeep_criteo_x1/README.md | 8 +-
.../WideDeep/WideDeep_criteo_x4_001/README.md | 8 +-
.../WideDeep/WideDeep_criteo_x4_002/README.md | 8 +-
.../ctr/WideDeep/WideDeep_frappe_x1/README.md | 8 +-
.../ctr/WideDeep/WideDeep_kkbox_x1/README.md | 6 +-
.../WideDeep_movielenslatest_x1/README.md | 8 +-
ranking/ctr/xDeepFM/CIN_avazu_x1/README.md | 8 +-
.../ctr/xDeepFM/CIN_avazu_x4_001/README.md | 6 +-
.../ctr/xDeepFM/CIN_avazu_x4_002/README.md | 6 +-
ranking/ctr/xDeepFM/CIN_criteo_x1/README.md | 8 +-
.../ctr/xDeepFM/CIN_criteo_x4_001/README.md | 8 +-
.../ctr/xDeepFM/CIN_criteo_x4_002/README.md | 8 +-
ranking/ctr/xDeepFM/CIN_frappe_x1/README.md | 8 +-
ranking/ctr/xDeepFM/CIN_kkbox_x1/README.md | 6 +-
.../xDeepFM/CIN_movielenslatest_x1/README.md | 8 +-
.../xDeepFM_amazonelectronics_x1/README.md | 8 +-
.../ctr/xDeepFM/xDeepFM_avazu_x1/README.md | 8 +-
.../xDeepFM/xDeepFM_avazu_x4_001/README.md | 8 +-
.../xDeepFM/xDeepFM_avazu_x4_002/README.md | 8 +-
.../ctr/xDeepFM/xDeepFM_criteo_x1/README.md | 8 +-
.../xDeepFM/xDeepFM_criteo_x4_001/README.md | 8 +-
.../xDeepFM/xDeepFM_criteo_x4_002/README.md | 8 +-
.../ctr/xDeepFM/xDeepFM_frappe_x1/README.md | 8 +-
.../ctr/xDeepFM/xDeepFM_kkbox_x1/README.md | 6 +-
.../xDeepFM/xDeepFM_kuaivideo_x1/README.md | 8 +-
.../xDeepFM_microvideo1.7m_x1/README.md | 8 +-
.../xDeepFM_movielenslatest_x1/README.md | 8 +-
.../ctr/xDeepFM/xDeepFM_taobaoad_x1/README.md | 8 +-
scripts/gen_readme_md.py | 16 +-
scripts/revise_readme_md.py | 21 +
412 files changed, 23062 insertions(+), 6507 deletions(-)
create mode 100644 docs/CTR/leaderboard/seqctr.md
create mode 100644 ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_011_19794dc6.log
create mode 100644 ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02.csv
create mode 100644 ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02.yaml
create mode 100644 ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02/dataset_config.yaml
create mode 100644 ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02/model_config.yaml
create mode 100644 ranking/ctr/DCNv2/DCNv2_avazu_x4_001/README.md
create mode 100644 ranking/ctr/DCNv2/DCNv2_avazu_x4_001/environments.txt
create mode 100644 ranking/ctr/DCNv2/DCNv2_avazu_x4_001/results.csv
create mode 100644 ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_001_005_c2376d55.log
create mode 100644 ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01.csv
create mode 100644 ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01.yaml
create mode 100644 ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01/dataset_config.yaml
create mode 100644 ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01/model_config.yaml
create mode 100644 ranking/ctr/DCNv2/DCNv2_criteo_x4_001/README.md
create mode 100644 ranking/ctr/DCNv2/DCNv2_criteo_x4_001/environments.txt
create mode 100644 ranking/ctr/DCNv2/DCNv2_criteo_x4_001/results.csv
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_001_006_a7c95fe1.log
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03.csv
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03.yaml
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03/dataset_config.yaml
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03/model_config.yaml
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/README.md
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/environments.txt
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/results.csv
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_001_002_53a37ddd.log
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03.csv
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03.yaml
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03/dataset_config.yaml
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03/model_config.yaml
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/README.md
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/environments.txt
create mode 100644 ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/results.csv
create mode 100644 ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_001_015_4b405413.log
create mode 100644 ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04.csv
create mode 100644 ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04.yaml
create mode 100644 ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04/dataset_config.yaml
create mode 100644 ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04/model_config.yaml
create mode 100644 ranking/ctr/FinalNet/FinalNet_avazu_x4_001/README.md
create mode 100644 ranking/ctr/FinalNet/FinalNet_avazu_x4_001/environments.txt
create mode 100644 ranking/ctr/FinalNet/FinalNet_avazu_x4_001/results.csv
create mode 100644 ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_001_041_449ccb21.log
create mode 100644 ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05.csv
create mode 100644 ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05.yaml
create mode 100644 ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05/dataset_config.yaml
create mode 100644 ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05/model_config.yaml
create mode 100644 ranking/ctr/FinalNet/FinalNet_criteo_x4_001/README.md
create mode 100644 ranking/ctr/FinalNet/FinalNet_criteo_x4_001/environments.txt
create mode 100644 ranking/ctr/FinalNet/FinalNet_criteo_x4_001/results.csv
create mode 100644 ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_001_019_541571c0.log
create mode 100644 ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03.csv
create mode 100644 ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03.yaml
create mode 100644 ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03/dataset_config.yaml
create mode 100644 ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03/model_config.yaml
create mode 100644 ranking/ctr/MaskNet/MaskNet_avazu_x4_001/README.md
create mode 100644 ranking/ctr/MaskNet/MaskNet_avazu_x4_001/environments.txt
create mode 100644 ranking/ctr/MaskNet/MaskNet_avazu_x4_001/results.csv
create mode 100644 ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_001_018_ccc857cd.log
create mode 100644 ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06.csv
create mode 100644 ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06.yaml
create mode 100644 ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06/dataset_config.yaml
create mode 100644 ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06/model_config.yaml
create mode 100644 ranking/ctr/MaskNet/MaskNet_criteo_x4_001/README.md
create mode 100644 ranking/ctr/MaskNet/MaskNet_criteo_x4_001/environments.txt
create mode 100644 ranking/ctr/MaskNet/MaskNet_criteo_x4_001/results.csv
create mode 100644 scripts/revise_readme_md.py
diff --git a/docs/CTR/index.md b/docs/CTR/index.md
index dbcc5ebf..fdb7e5f2 100644
--- a/docs/CTR/index.md
+++ b/docs/CTR/index.md
@@ -1,4 +1,4 @@
-# BARS-CTR Benchmark
+# Benchmark Overview
BARS-CTR: An Open Benchmark for CTR Prediction https://openbenchmark.github.io/BARS/CTR
diff --git a/docs/CTR/leaderboard/avazu_x4_001.csv b/docs/CTR/leaderboard/avazu_x4_001.csv
index 408376b5..499670bd 100644
--- a/docs/CTR/leaderboard/avazu_x4_001.csv
+++ b/docs/CTR/leaderboard/avazu_x4_001.csv
@@ -29,3 +29,7 @@ Year,Publication,Model,Paper URL,AUC,Logloss,Running Steps,Contributor
2020,AAAI'20,AFN+,https://ojs.aaai.org/index.php/AAAI/article/view/5768,0.7929,0.3726,https://github.com/reczoo/BARS/tree/main/ranking/ctr/AFN/AFN+_avazu_x4_001,"Zhu et al."
2020,WSDM'20,InterHAt,https://dl.acm.org/doi/10.1145/3336191.3371785,0.7882,0.3749,https://github.com/reczoo/BARS/tree/main/ranking/ctr/InterHAt/InterHAt_avazu_x4_001,"Zhu et al."
2020,DLP-KDD'20,FLEN,https://arxiv.org/abs/1911.04690,0.7929,0.372,https://github.com/reczoo/BARS/tree/main/ranking/ctr/FLEN/FLEN_avazu_x4_001,"Zhu et al."
+2021,WWW'21,DCN-V2,https://arxiv.org/abs/2008.13535,0.793146,0.371865,https://github.com/reczoo/BARS/tree/main/ranking/ctr/DCNv2/DCNv2_avazu_x4_001,"Zhu et al."
+2021,DLP-KDD'21,MaskNet,https://arxiv.org/abs/2102.07619,0.794382,0.371189,https://github.com/reczoo/BARS/tree/main/ranking/ctr/MaskNet/MaskNet_avazu_x4_001,"Zhu et al."
+2023,AAAI'23,FinalMLP,https://arxiv.org/abs/2304.00902,0.793537,0.371862,https://github.com/reczoo/BARS/tree/main/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001,"Zhu et al."
+2023,SIGIR'23,FinalNet,https://dl.acm.org/doi/10.1145/3539618.3591988,0.794116,0.371254,https://github.com/reczoo/BARS/tree/main/ranking/ctr/FinalNet/FinalNet_avazu_x4_001,"Zhu et al."
diff --git a/docs/CTR/leaderboard/criteo_x4_001.csv b/docs/CTR/leaderboard/criteo_x4_001.csv
index 1bcb2b5a..8b4a2f9d 100644
--- a/docs/CTR/leaderboard/criteo_x4_001.csv
+++ b/docs/CTR/leaderboard/criteo_x4_001.csv
@@ -28,3 +28,7 @@ Year,Publication,Model,Paper URL,AUC,Logloss,Running Steps,Contributor
2020,AAAI'20,AFN,https://ojs.aaai.org/index.php/AAAI/article/view/5768,0.8115,0.4402,https://github.com/reczoo/BARS/tree/main/ranking/ctr/AFN/AFN_criteo_x4_001,"Zhu et al."
2020,AAAI'20,AFN+,https://ojs.aaai.org/index.php/AAAI/article/view/5768,0.8138,0.4384,https://github.com/reczoo/BARS/tree/main/ranking/ctr/AFN/AFN+_criteo_x4_001,"Zhu et al."
2020,WSDM'20,InterHAt,https://dl.acm.org/doi/10.1145/3336191.3371785,0.8104,0.4414,https://github.com/reczoo/BARS/tree/main/ranking/ctr/InterHAt/InterHAt_criteo_x4_001,"Zhu et al."
+2021,WWW'21,DCN-V2,https://arxiv.org/abs/2008.13535,0.814514,0.437631,https://github.com/reczoo/BARS/tree/main/ranking/ctr/DCNv2/DCNv2_criteo_x4_001,"Zhu et al."
+2021,DLP-KDD'21,MaskNet,https://arxiv.org/abs/2102.07619,0.81342,0.438748,https://github.com/reczoo/BARS/tree/main/ranking/ctr/MaskNet/MaskNet_criteo_x4_001,"Zhu et al."
+2023,AAAI'23,FinalMLP,https://arxiv.org/abs/2304.00902,0.814584,0.437353,https://github.com/reczoo/BARS/tree/main/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001,"Zhu et al."
+2023,SIGIR'23,FinalNet,https://dl.acm.org/doi/10.1145/3539618.3591988,0.814966,0.437116,https://github.com/reczoo/BARS/tree/main/ranking/ctr/FinalNet/FinalNet_criteo_x4_001,"Zhu et al."
diff --git a/docs/CTR/leaderboard/index.md b/docs/CTR/leaderboard/index.md
index d63ac8bb..7830ddea 100644
--- a/docs/CTR/leaderboard/index.md
+++ b/docs/CTR/leaderboard/index.md
@@ -1,4 +1,4 @@
-# Benchmark Leaderboard
+# BARS-CTR Leaderboard
```{tableofcontents}
```
diff --git a/docs/CTR/leaderboard/seqctr.md b/docs/CTR/leaderboard/seqctr.md
new file mode 100644
index 00000000..607bf299
--- /dev/null
+++ b/docs/CTR/leaderboard/seqctr.md
@@ -0,0 +1,4 @@
+# BARS-SeqCTR Leaderboard
+
+```{tableofcontents}
+```
diff --git a/docs/Matching/index.md b/docs/Matching/index.md
index 1a8da37f..4b573534 100644
--- a/docs/Matching/index.md
+++ b/docs/Matching/index.md
@@ -1,4 +1,4 @@
-# BARS-Match Overview
+# Benchmark Overview
BARS-Match: An Open Benchmark for Candidate Item Matching https://openbenchmark.github.io/BARS/Matching
diff --git a/docs/Matching/leaderboard/index.md b/docs/Matching/leaderboard/index.md
index 8e1009e6..220de16a 100644
--- a/docs/Matching/leaderboard/index.md
+++ b/docs/Matching/leaderboard/index.md
@@ -1,4 +1,4 @@
-# Benchmark Leaderboard
+# BARS-Match Leaderboard
```{tableofcontents}
diff --git a/docs/_toc.yml b/docs/_toc.yml
index 59171823..bbceda45 100644
--- a/docs/_toc.yml
+++ b/docs/_toc.yml
@@ -25,10 +25,12 @@ parts:
- file: CTR/leaderboard/frappe_x1.md
- file: CTR/leaderboard/movielenslatest_x1.md
- file: CTR/leaderboard/kkbox_x1.md
+ - file: CTR/leaderboard/seqctr.md
+ sections:
- file: CTR/leaderboard/amazonelectronics_x1.md
- - file: CTR/leaderboard/taobaoad_x1.md
- file: CTR/leaderboard/kuaivideo_x1.md
- file: CTR/leaderboard/microvideo1.7m_x1.md
+ - file: CTR/leaderboard/taobaoad_x1.md
- caption: "CANDIDATE MATCHING"
chapters:
diff --git a/ranking/ctr/AFM/AFM_avazu_x1/README.md b/ranking/ctr/AFM/AFM_avazu_x1/README.md
index 4d108dc0..482b5c32 100644
--- a/ranking/ctr/AFM/AFM_avazu_x1/README.md
+++ b/ranking/ctr/AFM/AFM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFM/AFM_avazu_x4_001/README.md b/ranking/ctr/AFM/AFM_avazu_x4_001/README.md
index 66416e90..50553508 100644
--- a/ranking/ctr/AFM/AFM_avazu_x4_001/README.md
+++ b/ranking/ctr/AFM/AFM_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFM/AFM_avazu_x4_002/README.md b/ranking/ctr/AFM/AFM_avazu_x4_002/README.md
index e4932a34..3d14f24a 100644
--- a/ranking/ctr/AFM/AFM_avazu_x4_002/README.md
+++ b/ranking/ctr/AFM/AFM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFM/AFM_criteo_x1/README.md b/ranking/ctr/AFM/AFM_criteo_x1/README.md
index 8fb64be6..13a54d47 100644
--- a/ranking/ctr/AFM/AFM_criteo_x1/README.md
+++ b/ranking/ctr/AFM/AFM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFM/AFM_criteo_x4_001/README.md b/ranking/ctr/AFM/AFM_criteo_x4_001/README.md
index 94603649..5468243b 100644
--- a/ranking/ctr/AFM/AFM_criteo_x4_001/README.md
+++ b/ranking/ctr/AFM/AFM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFM/AFM_criteo_x4_002/README.md b/ranking/ctr/AFM/AFM_criteo_x4_002/README.md
index 33580948..3d90c041 100644
--- a/ranking/ctr/AFM/AFM_criteo_x4_002/README.md
+++ b/ranking/ctr/AFM/AFM_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFM/AFM_frappe_x1/README.md b/ranking/ctr/AFM/AFM_frappe_x1/README.md
index b72f44e2..b9c62a81 100644
--- a/ranking/ctr/AFM/AFM_frappe_x1/README.md
+++ b/ranking/ctr/AFM/AFM_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFM/AFM_kkbox_x1/README.md b/ranking/ctr/AFM/AFM_kkbox_x1/README.md
index aac197d2..63b93ae8 100644
--- a/ranking/ctr/AFM/AFM_kkbox_x1/README.md
+++ b/ranking/ctr/AFM/AFM_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/KKBox/README.md#KKBox_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [KKBox_x1](https://github.com/reczoo/Datasets/tree/main/KKBox/KKBox_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFM/AFM_movielenslatest_x1/README.md b/ranking/ctr/AFM/AFM_movielenslatest_x1/README.md
index 6ff0722d..81c064ea 100644
--- a/ranking/ctr/AFM/AFM_movielenslatest_x1/README.md
+++ b/ranking/ctr/AFM/AFM_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN+_avazu_x1/README.md b/ranking/ctr/AFN/AFN+_avazu_x1/README.md
index 267c26a5..6ac7a1b2 100644
--- a/ranking/ctr/AFN/AFN+_avazu_x1/README.md
+++ b/ranking/ctr/AFN/AFN+_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN+_avazu_x4_001/README.md b/ranking/ctr/AFN/AFN+_avazu_x4_001/README.md
index d6b6ab21..dff4b2db 100644
--- a/ranking/ctr/AFN/AFN+_avazu_x4_001/README.md
+++ b/ranking/ctr/AFN/AFN+_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN+_avazu_x4_002/README.md b/ranking/ctr/AFN/AFN+_avazu_x4_002/README.md
index affabdd2..20590274 100644
--- a/ranking/ctr/AFN/AFN+_avazu_x4_002/README.md
+++ b/ranking/ctr/AFN/AFN+_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN+_criteo_x1/README.md b/ranking/ctr/AFN/AFN+_criteo_x1/README.md
index 401ace72..d528e9fa 100644
--- a/ranking/ctr/AFN/AFN+_criteo_x1/README.md
+++ b/ranking/ctr/AFN/AFN+_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN+_criteo_x4_001/README.md b/ranking/ctr/AFN/AFN+_criteo_x4_001/README.md
index 8bf0c331..6047f53e 100644
--- a/ranking/ctr/AFN/AFN+_criteo_x4_001/README.md
+++ b/ranking/ctr/AFN/AFN+_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN+_criteo_x4_002/README.md b/ranking/ctr/AFN/AFN+_criteo_x4_002/README.md
index 1e63a818..109a581b 100644
--- a/ranking/ctr/AFN/AFN+_criteo_x4_002/README.md
+++ b/ranking/ctr/AFN/AFN+_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN+_frappe_x1/README.md b/ranking/ctr/AFN/AFN+_frappe_x1/README.md
index 2a3bf948..d00aba51 100644
--- a/ranking/ctr/AFN/AFN+_frappe_x1/README.md
+++ b/ranking/ctr/AFN/AFN+_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN+_kkbox_x1/README.md b/ranking/ctr/AFN/AFN+_kkbox_x1/README.md
index 02917061..a22972b4 100644
--- a/ranking/ctr/AFN/AFN+_kkbox_x1/README.md
+++ b/ranking/ctr/AFN/AFN+_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN+_movielenslatest_x1/README.md b/ranking/ctr/AFN/AFN+_movielenslatest_x1/README.md
index 90a43bd1..13fd3d10 100644
--- a/ranking/ctr/AFN/AFN+_movielenslatest_x1/README.md
+++ b/ranking/ctr/AFN/AFN+_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN_avazu_x1/README.md b/ranking/ctr/AFN/AFN_avazu_x1/README.md
index 263f3ae8..93aa265b 100644
--- a/ranking/ctr/AFN/AFN_avazu_x1/README.md
+++ b/ranking/ctr/AFN/AFN_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN_avazu_x4_001/README.md b/ranking/ctr/AFN/AFN_avazu_x4_001/README.md
index 6a51afdc..b101bdab 100644
--- a/ranking/ctr/AFN/AFN_avazu_x4_001/README.md
+++ b/ranking/ctr/AFN/AFN_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN_avazu_x4_002/README.md b/ranking/ctr/AFN/AFN_avazu_x4_002/README.md
index 827b0e0b..4f13fc73 100644
--- a/ranking/ctr/AFN/AFN_avazu_x4_002/README.md
+++ b/ranking/ctr/AFN/AFN_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN_criteo_x1/README.md b/ranking/ctr/AFN/AFN_criteo_x1/README.md
index d5d8fcfe..e74de1b6 100644
--- a/ranking/ctr/AFN/AFN_criteo_x1/README.md
+++ b/ranking/ctr/AFN/AFN_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN_criteo_x4_001/README.md b/ranking/ctr/AFN/AFN_criteo_x4_001/README.md
index 257e7c9e..021782d5 100644
--- a/ranking/ctr/AFN/AFN_criteo_x4_001/README.md
+++ b/ranking/ctr/AFN/AFN_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN_criteo_x4_002/README.md b/ranking/ctr/AFN/AFN_criteo_x4_002/README.md
index 345057c5..79d45f77 100644
--- a/ranking/ctr/AFN/AFN_criteo_x4_002/README.md
+++ b/ranking/ctr/AFN/AFN_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN_frappe_x1/README.md b/ranking/ctr/AFN/AFN_frappe_x1/README.md
index a70b307c..dbe2a5e2 100644
--- a/ranking/ctr/AFN/AFN_frappe_x1/README.md
+++ b/ranking/ctr/AFN/AFN_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN_kkbox_x1/README.md b/ranking/ctr/AFN/AFN_kkbox_x1/README.md
index 282b7140..459ec281 100644
--- a/ranking/ctr/AFN/AFN_kkbox_x1/README.md
+++ b/ranking/ctr/AFN/AFN_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AFN/AFN_movielenslatest_x1/README.md b/ranking/ctr/AFN/AFN_movielenslatest_x1/README.md
index e5a62d35..c4ab84a3 100644
--- a/ranking/ctr/AFN/AFN_movielenslatest_x1/README.md
+++ b/ranking/ctr/AFN/AFN_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AFN model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [AFN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/AFN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AOANet/AOANet_amazonelectronics_x1/README.md b/ranking/ctr/AOANet/AOANet_amazonelectronics_x1/README.md
index b349d33c..29b823b3 100644
--- a/ranking/ctr/AOANet/AOANet_amazonelectronics_x1/README.md
+++ b/ranking/ctr/AOANet/AOANet_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AOANet model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [AOANet](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/AOANet) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [AOANet](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/AOANet) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/AOANet/AOANet_avazu_x1/README.md b/ranking/ctr/AOANet/AOANet_avazu_x1/README.md
index 1d3b1d24..f2529c75 100644
--- a/ranking/ctr/AOANet/AOANet_avazu_x1/README.md
+++ b/ranking/ctr/AOANet/AOANet_avazu_x1/README.md
@@ -1,188 +1,188 @@
-## AOANet_avazu_x1
-
-A hands-on guide to run the AOANet model on the Avazu_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
- GPU: Tesla V100 32G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.2
- python: 3.6.4
- pytorch: 1.0.0
- pandas: 0.22.0
- numpy: 1.19.2
- scipy: 1.5.4
- sklearn: 0.22.1
- pyyaml: 5.4.1
- h5py: 2.8.0
- tqdm: 4.60.0
- fuxictr: 1.2.1
-
- ```
-
-### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu#Avazu_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [AOANet](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/AOANet.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Avazu/Avazu_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [AOANet_avazu_x1_tuner_config_01](./AOANet_avazu_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd AOANet_avazu_x1
- nohup python run_expid.py --config ./AOANet_avazu_x1_tuner_config_01 --expid AOANet_avazu_x1_004_a663f0bb --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.765390 | 0.366352 |
-
-
-### Logs
-```python
-2022-05-31 10:44:41,754 P20282 INFO {
- "batch_norm": "True",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Avazu/",
- "dataset_id": "avazu_x1_3fb65689",
- "debug": "False",
- "dnn_hidden_activations": "ReLU",
- "dnn_hidden_units": "[400, 400, 400]",
- "embedding_dim": "10",
- "embedding_regularizer": "0.1",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['feat_1', 'feat_2', 'feat_3', 'feat_4', 'feat_5', 'feat_6', 'feat_7', 'feat_8', 'feat_9', 'feat_10', 'feat_11', 'feat_12', 'feat_13', 'feat_14', 'feat_15', 'feat_16', 'feat_17', 'feat_18', 'feat_19', 'feat_20', 'feat_21', 'feat_22'], 'type': 'categorical'}]",
- "gpu": "3",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "AOANet",
- "model_id": "AOANet_avazu_x1_004_a663f0bb",
- "model_root": "./Avazu/AOANet_avazu_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_dropout": "0.2",
- "net_regularizer": "0",
- "num_interaction_layers": "1",
- "num_subspaces": "2",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Avazu/Avazu_x1/test.csv",
- "train_data": "../data/Avazu/Avazu_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/Avazu/Avazu_x1/valid.csv",
- "verbose": "0",
- "version": "pytorch"
-}
-2022-05-31 10:44:41,754 P20282 INFO Set up feature encoder...
-2022-05-31 10:44:41,755 P20282 INFO Load feature_map from json: ../data/Avazu/avazu_x1_3fb65689/feature_map.json
-2022-05-31 10:44:41,755 P20282 INFO Loading data...
-2022-05-31 10:44:41,756 P20282 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/train.h5
-2022-05-31 10:44:44,227 P20282 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/valid.h5
-2022-05-31 10:44:44,604 P20282 INFO Train samples: total/28300276, pos/4953382, neg/23346894, ratio/17.50%, blocks/1
-2022-05-31 10:44:44,604 P20282 INFO Validation samples: total/4042897, pos/678699, neg/3364198, ratio/16.79%, blocks/1
-2022-05-31 10:44:44,604 P20282 INFO Loading train data done.
-2022-05-31 10:44:50,693 P20282 INFO Total number of parameters: 13399199.
-2022-05-31 10:44:50,694 P20282 INFO Start training: 6910 batches/epoch
-2022-05-31 10:44:50,694 P20282 INFO ************ Epoch=1 start ************
-2022-05-31 11:04:18,922 P20282 INFO [Metrics] AUC: 0.731240 - logloss: 0.404159
-2022-05-31 11:04:18,924 P20282 INFO Save best model: monitor(max): 0.731240
-2022-05-31 11:04:19,187 P20282 INFO --- 6910/6910 batches finished ---
-2022-05-31 11:04:19,231 P20282 INFO Train loss: 0.446839
-2022-05-31 11:04:19,231 P20282 INFO ************ Epoch=1 end ************
-2022-05-31 11:23:43,789 P20282 INFO [Metrics] AUC: 0.736532 - logloss: 0.402076
-2022-05-31 11:23:43,792 P20282 INFO Save best model: monitor(max): 0.736532
-2022-05-31 11:23:43,862 P20282 INFO --- 6910/6910 batches finished ---
-2022-05-31 11:23:43,901 P20282 INFO Train loss: 0.443322
-2022-05-31 11:23:43,901 P20282 INFO ************ Epoch=2 end ************
-2022-05-31 11:43:06,150 P20282 INFO [Metrics] AUC: 0.734590 - logloss: 0.401921
-2022-05-31 11:43:06,152 P20282 INFO Monitor(max) STOP: 0.734590 !
-2022-05-31 11:43:06,152 P20282 INFO Reduce learning rate on plateau: 0.000100
-2022-05-31 11:43:06,152 P20282 INFO --- 6910/6910 batches finished ---
-2022-05-31 11:43:06,197 P20282 INFO Train loss: 0.442913
-2022-05-31 11:43:06,198 P20282 INFO ************ Epoch=3 end ************
-2022-05-31 12:02:24,338 P20282 INFO [Metrics] AUC: 0.745001 - logloss: 0.397410
-2022-05-31 12:02:24,340 P20282 INFO Save best model: monitor(max): 0.745001
-2022-05-31 12:02:24,404 P20282 INFO --- 6910/6910 batches finished ---
-2022-05-31 12:02:24,452 P20282 INFO Train loss: 0.411588
-2022-05-31 12:02:24,452 P20282 INFO ************ Epoch=4 end ************
-2022-05-31 12:21:41,677 P20282 INFO [Metrics] AUC: 0.745250 - logloss: 0.396864
-2022-05-31 12:21:41,680 P20282 INFO Save best model: monitor(max): 0.745250
-2022-05-31 12:21:41,754 P20282 INFO --- 6910/6910 batches finished ---
-2022-05-31 12:21:41,795 P20282 INFO Train loss: 0.413135
-2022-05-31 12:21:41,795 P20282 INFO ************ Epoch=5 end ************
-2022-05-31 12:40:57,943 P20282 INFO [Metrics] AUC: 0.742879 - logloss: 0.398163
-2022-05-31 12:40:57,945 P20282 INFO Monitor(max) STOP: 0.742879 !
-2022-05-31 12:40:57,945 P20282 INFO Reduce learning rate on plateau: 0.000010
-2022-05-31 12:40:57,945 P20282 INFO --- 6910/6910 batches finished ---
-2022-05-31 12:40:57,992 P20282 INFO Train loss: 0.414100
-2022-05-31 12:40:57,992 P20282 INFO ************ Epoch=6 end ************
-2022-05-31 13:00:13,211 P20282 INFO [Metrics] AUC: 0.747211 - logloss: 0.396400
-2022-05-31 13:00:13,213 P20282 INFO Save best model: monitor(max): 0.747211
-2022-05-31 13:00:13,278 P20282 INFO --- 6910/6910 batches finished ---
-2022-05-31 13:00:13,322 P20282 INFO Train loss: 0.398612
-2022-05-31 13:00:13,322 P20282 INFO ************ Epoch=7 end ************
-2022-05-31 13:19:29,209 P20282 INFO [Metrics] AUC: 0.744972 - logloss: 0.397386
-2022-05-31 13:19:29,211 P20282 INFO Monitor(max) STOP: 0.744972 !
-2022-05-31 13:19:29,211 P20282 INFO Reduce learning rate on plateau: 0.000001
-2022-05-31 13:19:29,211 P20282 INFO --- 6910/6910 batches finished ---
-2022-05-31 13:19:29,258 P20282 INFO Train loss: 0.397166
-2022-05-31 13:19:29,258 P20282 INFO ************ Epoch=8 end ************
-2022-05-31 13:38:41,141 P20282 INFO [Metrics] AUC: 0.742032 - logloss: 0.398859
-2022-05-31 13:38:41,144 P20282 INFO Monitor(max) STOP: 0.742032 !
-2022-05-31 13:38:41,144 P20282 INFO Reduce learning rate on plateau: 0.000001
-2022-05-31 13:38:41,144 P20282 INFO Early stopping at epoch=9
-2022-05-31 13:38:41,144 P20282 INFO --- 6910/6910 batches finished ---
-2022-05-31 13:38:41,191 P20282 INFO Train loss: 0.390920
-2022-05-31 13:38:41,191 P20282 INFO Training finished.
-2022-05-31 13:38:41,191 P20282 INFO Load best model: /cache/FuxiCTR/benchmarks/Avazu/AOANet_avazu_x1/avazu_x1_3fb65689/AOANet_avazu_x1_004_a663f0bb.model
-2022-05-31 13:38:48,274 P20282 INFO ****** Validation evaluation ******
-2022-05-31 13:39:14,341 P20282 INFO [Metrics] AUC: 0.747211 - logloss: 0.396400
-2022-05-31 13:39:14,419 P20282 INFO ******** Test evaluation ********
-2022-05-31 13:39:14,419 P20282 INFO Loading data...
-2022-05-31 13:39:14,420 P20282 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/test.h5
-2022-05-31 13:39:15,250 P20282 INFO Test samples: total/8085794, pos/1232985, neg/6852809, ratio/15.25%, blocks/1
-2022-05-31 13:39:15,251 P20282 INFO Loading test data done.
-2022-05-31 13:40:05,573 P20282 INFO [Metrics] AUC: 0.765390 - logloss: 0.366352
-
-```
+## AOANet_avazu_x1
+
+A hands-on guide to run the AOANet model on the Avazu_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.6.4
+ pytorch: 1.0.0
+ pandas: 0.22.0
+ numpy: 1.19.2
+ scipy: 1.5.4
+ sklearn: 0.22.1
+ pyyaml: 5.4.1
+ h5py: 2.8.0
+ tqdm: 4.60.0
+ fuxictr: 1.2.1
+
+ ```
+
+### Dataset
+Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu#Avazu_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [AOANet](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/AOANet.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Avazu/Avazu_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [AOANet_avazu_x1_tuner_config_01](./AOANet_avazu_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd AOANet_avazu_x1
+ nohup python run_expid.py --config ./AOANet_avazu_x1_tuner_config_01 --expid AOANet_avazu_x1_004_a663f0bb --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.765390 | 0.366352 |
+
+
+### Logs
+```python
+2022-05-31 10:44:41,754 P20282 INFO {
+ "batch_norm": "True",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x1_3fb65689",
+ "debug": "False",
+ "dnn_hidden_activations": "ReLU",
+ "dnn_hidden_units": "[400, 400, 400]",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.1",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['feat_1', 'feat_2', 'feat_3', 'feat_4', 'feat_5', 'feat_6', 'feat_7', 'feat_8', 'feat_9', 'feat_10', 'feat_11', 'feat_12', 'feat_13', 'feat_14', 'feat_15', 'feat_16', 'feat_17', 'feat_18', 'feat_19', 'feat_20', 'feat_21', 'feat_22'], 'type': 'categorical'}]",
+ "gpu": "3",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "AOANet",
+ "model_id": "AOANet_avazu_x1_004_a663f0bb",
+ "model_root": "./Avazu/AOANet_avazu_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0.2",
+ "net_regularizer": "0",
+ "num_interaction_layers": "1",
+ "num_subspaces": "2",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x1/test.csv",
+ "train_data": "../data/Avazu/Avazu_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/Avazu/Avazu_x1/valid.csv",
+ "verbose": "0",
+ "version": "pytorch"
+}
+2022-05-31 10:44:41,754 P20282 INFO Set up feature encoder...
+2022-05-31 10:44:41,755 P20282 INFO Load feature_map from json: ../data/Avazu/avazu_x1_3fb65689/feature_map.json
+2022-05-31 10:44:41,755 P20282 INFO Loading data...
+2022-05-31 10:44:41,756 P20282 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/train.h5
+2022-05-31 10:44:44,227 P20282 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/valid.h5
+2022-05-31 10:44:44,604 P20282 INFO Train samples: total/28300276, pos/4953382, neg/23346894, ratio/17.50%, blocks/1
+2022-05-31 10:44:44,604 P20282 INFO Validation samples: total/4042897, pos/678699, neg/3364198, ratio/16.79%, blocks/1
+2022-05-31 10:44:44,604 P20282 INFO Loading train data done.
+2022-05-31 10:44:50,693 P20282 INFO Total number of parameters: 13399199.
+2022-05-31 10:44:50,694 P20282 INFO Start training: 6910 batches/epoch
+2022-05-31 10:44:50,694 P20282 INFO ************ Epoch=1 start ************
+2022-05-31 11:04:18,922 P20282 INFO [Metrics] AUC: 0.731240 - logloss: 0.404159
+2022-05-31 11:04:18,924 P20282 INFO Save best model: monitor(max): 0.731240
+2022-05-31 11:04:19,187 P20282 INFO --- 6910/6910 batches finished ---
+2022-05-31 11:04:19,231 P20282 INFO Train loss: 0.446839
+2022-05-31 11:04:19,231 P20282 INFO ************ Epoch=1 end ************
+2022-05-31 11:23:43,789 P20282 INFO [Metrics] AUC: 0.736532 - logloss: 0.402076
+2022-05-31 11:23:43,792 P20282 INFO Save best model: monitor(max): 0.736532
+2022-05-31 11:23:43,862 P20282 INFO --- 6910/6910 batches finished ---
+2022-05-31 11:23:43,901 P20282 INFO Train loss: 0.443322
+2022-05-31 11:23:43,901 P20282 INFO ************ Epoch=2 end ************
+2022-05-31 11:43:06,150 P20282 INFO [Metrics] AUC: 0.734590 - logloss: 0.401921
+2022-05-31 11:43:06,152 P20282 INFO Monitor(max) STOP: 0.734590 !
+2022-05-31 11:43:06,152 P20282 INFO Reduce learning rate on plateau: 0.000100
+2022-05-31 11:43:06,152 P20282 INFO --- 6910/6910 batches finished ---
+2022-05-31 11:43:06,197 P20282 INFO Train loss: 0.442913
+2022-05-31 11:43:06,198 P20282 INFO ************ Epoch=3 end ************
+2022-05-31 12:02:24,338 P20282 INFO [Metrics] AUC: 0.745001 - logloss: 0.397410
+2022-05-31 12:02:24,340 P20282 INFO Save best model: monitor(max): 0.745001
+2022-05-31 12:02:24,404 P20282 INFO --- 6910/6910 batches finished ---
+2022-05-31 12:02:24,452 P20282 INFO Train loss: 0.411588
+2022-05-31 12:02:24,452 P20282 INFO ************ Epoch=4 end ************
+2022-05-31 12:21:41,677 P20282 INFO [Metrics] AUC: 0.745250 - logloss: 0.396864
+2022-05-31 12:21:41,680 P20282 INFO Save best model: monitor(max): 0.745250
+2022-05-31 12:21:41,754 P20282 INFO --- 6910/6910 batches finished ---
+2022-05-31 12:21:41,795 P20282 INFO Train loss: 0.413135
+2022-05-31 12:21:41,795 P20282 INFO ************ Epoch=5 end ************
+2022-05-31 12:40:57,943 P20282 INFO [Metrics] AUC: 0.742879 - logloss: 0.398163
+2022-05-31 12:40:57,945 P20282 INFO Monitor(max) STOP: 0.742879 !
+2022-05-31 12:40:57,945 P20282 INFO Reduce learning rate on plateau: 0.000010
+2022-05-31 12:40:57,945 P20282 INFO --- 6910/6910 batches finished ---
+2022-05-31 12:40:57,992 P20282 INFO Train loss: 0.414100
+2022-05-31 12:40:57,992 P20282 INFO ************ Epoch=6 end ************
+2022-05-31 13:00:13,211 P20282 INFO [Metrics] AUC: 0.747211 - logloss: 0.396400
+2022-05-31 13:00:13,213 P20282 INFO Save best model: monitor(max): 0.747211
+2022-05-31 13:00:13,278 P20282 INFO --- 6910/6910 batches finished ---
+2022-05-31 13:00:13,322 P20282 INFO Train loss: 0.398612
+2022-05-31 13:00:13,322 P20282 INFO ************ Epoch=7 end ************
+2022-05-31 13:19:29,209 P20282 INFO [Metrics] AUC: 0.744972 - logloss: 0.397386
+2022-05-31 13:19:29,211 P20282 INFO Monitor(max) STOP: 0.744972 !
+2022-05-31 13:19:29,211 P20282 INFO Reduce learning rate on plateau: 0.000001
+2022-05-31 13:19:29,211 P20282 INFO --- 6910/6910 batches finished ---
+2022-05-31 13:19:29,258 P20282 INFO Train loss: 0.397166
+2022-05-31 13:19:29,258 P20282 INFO ************ Epoch=8 end ************
+2022-05-31 13:38:41,141 P20282 INFO [Metrics] AUC: 0.742032 - logloss: 0.398859
+2022-05-31 13:38:41,144 P20282 INFO Monitor(max) STOP: 0.742032 !
+2022-05-31 13:38:41,144 P20282 INFO Reduce learning rate on plateau: 0.000001
+2022-05-31 13:38:41,144 P20282 INFO Early stopping at epoch=9
+2022-05-31 13:38:41,144 P20282 INFO --- 6910/6910 batches finished ---
+2022-05-31 13:38:41,191 P20282 INFO Train loss: 0.390920
+2022-05-31 13:38:41,191 P20282 INFO Training finished.
+2022-05-31 13:38:41,191 P20282 INFO Load best model: /cache/FuxiCTR/benchmarks/Avazu/AOANet_avazu_x1/avazu_x1_3fb65689/AOANet_avazu_x1_004_a663f0bb.model
+2022-05-31 13:38:48,274 P20282 INFO ****** Validation evaluation ******
+2022-05-31 13:39:14,341 P20282 INFO [Metrics] AUC: 0.747211 - logloss: 0.396400
+2022-05-31 13:39:14,419 P20282 INFO ******** Test evaluation ********
+2022-05-31 13:39:14,419 P20282 INFO Loading data...
+2022-05-31 13:39:14,420 P20282 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/test.h5
+2022-05-31 13:39:15,250 P20282 INFO Test samples: total/8085794, pos/1232985, neg/6852809, ratio/15.25%, blocks/1
+2022-05-31 13:39:15,251 P20282 INFO Loading test data done.
+2022-05-31 13:40:05,573 P20282 INFO [Metrics] AUC: 0.765390 - logloss: 0.366352
+
+```
diff --git a/ranking/ctr/AOANet/AOANet_criteo_x1/README.md b/ranking/ctr/AOANet/AOANet_criteo_x1/README.md
index 5a6d3750..ce775002 100644
--- a/ranking/ctr/AOANet/AOANet_criteo_x1/README.md
+++ b/ranking/ctr/AOANet/AOANet_criteo_x1/README.md
@@ -1,237 +1,237 @@
-## AOANet_criteo_x1
-
-A hands-on guide to run the AOANet model on the Criteo_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
- GPU: Tesla V100 32G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.2
- python: 3.6.4
- pytorch: 1.0.0
- pandas: 0.22.0
- numpy: 1.19.2
- scipy: 1.5.4
- sklearn: 0.22.1
- pyyaml: 5.4.1
- h5py: 2.8.0
- tqdm: 4.60.0
- fuxictr: 1.2.1
-
- ```
-
-### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo#Criteo_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [AOANet](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/AOANet.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Criteo/Criteo_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [AOANet_criteo_x1_tuner_config_03](./AOANet_criteo_x1_tuner_config_03). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd AOANet_criteo_x1
- nohup python run_expid.py --config ./AOANet_criteo_x1_tuner_config_03 --expid AOANet_criteo_x1_005_faa15d7f --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.814127 | 0.437825 |
-
-
-### Logs
-```python
-2022-06-01 14:01:59,484 P56037 INFO {
- "batch_norm": "True",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Criteo/",
- "dataset_id": "criteo_x1_7b681156",
- "debug": "False",
- "dnn_hidden_activations": "ReLU",
- "dnn_hidden_units": "[400, 400, 400]",
- "embedding_dim": "10",
- "embedding_regularizer": "1e-05",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'type': 'numeric'}, {'active': True, 'dtype': 'float', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
- "gpu": "4",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "AOANet",
- "model_id": "AOANet_criteo_x1_005_faa15d7f",
- "model_root": "./Criteo/AOANet_criteo_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_dropout": "0.1",
- "net_regularizer": "0",
- "num_interaction_layers": "1",
- "num_subspaces": "8",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Criteo/Criteo_x1/test.csv",
- "train_data": "../data/Criteo/Criteo_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/Criteo/Criteo_x1/valid.csv",
- "verbose": "0",
- "version": "pytorch"
-}
-2022-06-01 14:01:59,484 P56037 INFO Set up feature encoder...
-2022-06-01 14:01:59,484 P56037 INFO Load feature_map from json: ../data/Criteo/criteo_x1_7b681156/feature_map.json
-2022-06-01 14:01:59,485 P56037 INFO Loading data...
-2022-06-01 14:01:59,485 P56037 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/train.h5
-2022-06-01 14:02:04,219 P56037 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/valid.h5
-2022-06-01 14:02:05,389 P56037 INFO Train samples: total/33003326, pos/8456369, neg/24546957, ratio/25.62%, blocks/1
-2022-06-01 14:02:05,389 P56037 INFO Validation samples: total/8250124, pos/2114300, neg/6135824, ratio/25.63%, blocks/1
-2022-06-01 14:02:05,389 P56037 INFO Loading train data done.
-2022-06-01 14:02:09,801 P56037 INFO Total number of parameters: 21356289.
-2022-06-01 14:02:09,802 P56037 INFO Start training: 8058 batches/epoch
-2022-06-01 14:02:09,802 P56037 INFO ************ Epoch=1 start ************
-2022-06-01 14:22:25,613 P56037 INFO [Metrics] AUC: 0.804139 - logloss: 0.447060
-2022-06-01 14:22:25,615 P56037 INFO Save best model: monitor(max): 0.804139
-2022-06-01 14:22:25,859 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 14:22:25,898 P56037 INFO Train loss: 0.461745
-2022-06-01 14:22:25,899 P56037 INFO ************ Epoch=1 end ************
-2022-06-01 14:42:40,739 P56037 INFO [Metrics] AUC: 0.806324 - logloss: 0.445000
-2022-06-01 14:42:40,740 P56037 INFO Save best model: monitor(max): 0.806324
-2022-06-01 14:42:40,852 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 14:42:40,897 P56037 INFO Train loss: 0.455928
-2022-06-01 14:42:40,897 P56037 INFO ************ Epoch=2 end ************
-2022-06-01 15:02:54,868 P56037 INFO [Metrics] AUC: 0.807531 - logloss: 0.443970
-2022-06-01 15:02:54,869 P56037 INFO Save best model: monitor(max): 0.807531
-2022-06-01 15:02:54,967 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 15:02:55,010 P56037 INFO Train loss: 0.454417
-2022-06-01 15:02:55,010 P56037 INFO ************ Epoch=3 end ************
-2022-06-01 15:23:10,668 P56037 INFO [Metrics] AUC: 0.808247 - logloss: 0.443340
-2022-06-01 15:23:10,669 P56037 INFO Save best model: monitor(max): 0.808247
-2022-06-01 15:23:10,780 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 15:23:10,817 P56037 INFO Train loss: 0.453657
-2022-06-01 15:23:10,817 P56037 INFO ************ Epoch=4 end ************
-2022-06-01 15:43:29,411 P56037 INFO [Metrics] AUC: 0.808612 - logloss: 0.442935
-2022-06-01 15:43:29,413 P56037 INFO Save best model: monitor(max): 0.808612
-2022-06-01 15:43:29,504 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 15:43:29,540 P56037 INFO Train loss: 0.453190
-2022-06-01 15:43:29,540 P56037 INFO ************ Epoch=5 end ************
-2022-06-01 16:03:45,367 P56037 INFO [Metrics] AUC: 0.808954 - logloss: 0.442598
-2022-06-01 16:03:45,368 P56037 INFO Save best model: monitor(max): 0.808954
-2022-06-01 16:03:45,468 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 16:03:45,507 P56037 INFO Train loss: 0.452838
-2022-06-01 16:03:45,507 P56037 INFO ************ Epoch=6 end ************
-2022-06-01 16:24:03,460 P56037 INFO [Metrics] AUC: 0.809303 - logloss: 0.442277
-2022-06-01 16:24:03,461 P56037 INFO Save best model: monitor(max): 0.809303
-2022-06-01 16:24:03,559 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 16:24:03,606 P56037 INFO Train loss: 0.452568
-2022-06-01 16:24:03,606 P56037 INFO ************ Epoch=7 end ************
-2022-06-01 16:44:21,524 P56037 INFO [Metrics] AUC: 0.809538 - logloss: 0.442141
-2022-06-01 16:44:21,525 P56037 INFO Save best model: monitor(max): 0.809538
-2022-06-01 16:44:21,625 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 16:44:21,666 P56037 INFO Train loss: 0.452320
-2022-06-01 16:44:21,666 P56037 INFO ************ Epoch=8 end ************
-2022-06-01 17:04:42,446 P56037 INFO [Metrics] AUC: 0.809621 - logloss: 0.442050
-2022-06-01 17:04:42,447 P56037 INFO Save best model: monitor(max): 0.809621
-2022-06-01 17:04:42,547 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 17:04:42,589 P56037 INFO Train loss: 0.452143
-2022-06-01 17:04:42,589 P56037 INFO ************ Epoch=9 end ************
-2022-06-01 17:25:04,632 P56037 INFO [Metrics] AUC: 0.809706 - logloss: 0.441934
-2022-06-01 17:25:04,633 P56037 INFO Save best model: monitor(max): 0.809706
-2022-06-01 17:25:04,729 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 17:25:04,774 P56037 INFO Train loss: 0.451989
-2022-06-01 17:25:04,774 P56037 INFO ************ Epoch=10 end ************
-2022-06-01 17:45:22,004 P56037 INFO [Metrics] AUC: 0.809965 - logloss: 0.441689
-2022-06-01 17:45:22,006 P56037 INFO Save best model: monitor(max): 0.809965
-2022-06-01 17:45:22,099 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 17:45:22,144 P56037 INFO Train loss: 0.451828
-2022-06-01 17:45:22,144 P56037 INFO ************ Epoch=11 end ************
-2022-06-01 18:05:44,276 P56037 INFO [Metrics] AUC: 0.810023 - logloss: 0.441629
-2022-06-01 18:05:44,277 P56037 INFO Save best model: monitor(max): 0.810023
-2022-06-01 18:05:44,367 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 18:05:44,409 P56037 INFO Train loss: 0.451707
-2022-06-01 18:05:44,410 P56037 INFO ************ Epoch=12 end ************
-2022-06-01 18:25:57,765 P56037 INFO [Metrics] AUC: 0.809893 - logloss: 0.441754
-2022-06-01 18:25:57,767 P56037 INFO Monitor(max) STOP: 0.809893 !
-2022-06-01 18:25:57,767 P56037 INFO Reduce learning rate on plateau: 0.000100
-2022-06-01 18:25:57,767 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 18:25:57,813 P56037 INFO Train loss: 0.451602
-2022-06-01 18:25:57,814 P56037 INFO ************ Epoch=13 end ************
-2022-06-01 18:46:18,280 P56037 INFO [Metrics] AUC: 0.813067 - logloss: 0.438869
-2022-06-01 18:46:18,281 P56037 INFO Save best model: monitor(max): 0.813067
-2022-06-01 18:46:18,375 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 18:46:18,423 P56037 INFO Train loss: 0.441052
-2022-06-01 18:46:18,423 P56037 INFO ************ Epoch=14 end ************
-2022-06-01 19:06:37,259 P56037 INFO [Metrics] AUC: 0.813573 - logloss: 0.438453
-2022-06-01 19:06:37,260 P56037 INFO Save best model: monitor(max): 0.813573
-2022-06-01 19:06:37,359 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 19:06:37,406 P56037 INFO Train loss: 0.437117
-2022-06-01 19:06:37,406 P56037 INFO ************ Epoch=15 end ************
-2022-06-01 19:26:55,152 P56037 INFO [Metrics] AUC: 0.813653 - logloss: 0.438349
-2022-06-01 19:26:55,154 P56037 INFO Save best model: monitor(max): 0.813653
-2022-06-01 19:26:55,266 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 19:26:55,316 P56037 INFO Train loss: 0.435369
-2022-06-01 19:26:55,316 P56037 INFO ************ Epoch=16 end ************
-2022-06-01 19:47:12,665 P56037 INFO [Metrics] AUC: 0.813704 - logloss: 0.438395
-2022-06-01 19:47:12,666 P56037 INFO Save best model: monitor(max): 0.813704
-2022-06-01 19:47:12,763 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 19:47:12,803 P56037 INFO Train loss: 0.434067
-2022-06-01 19:47:12,804 P56037 INFO ************ Epoch=17 end ************
-2022-06-01 20:07:27,152 P56037 INFO [Metrics] AUC: 0.813549 - logloss: 0.438590
-2022-06-01 20:07:27,153 P56037 INFO Monitor(max) STOP: 0.813549 !
-2022-06-01 20:07:27,153 P56037 INFO Reduce learning rate on plateau: 0.000010
-2022-06-01 20:07:27,153 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 20:07:27,196 P56037 INFO Train loss: 0.432977
-2022-06-01 20:07:27,196 P56037 INFO ************ Epoch=18 end ************
-2022-06-01 20:27:40,295 P56037 INFO [Metrics] AUC: 0.813055 - logloss: 0.439342
-2022-06-01 20:27:40,296 P56037 INFO Monitor(max) STOP: 0.813055 !
-2022-06-01 20:27:40,296 P56037 INFO Reduce learning rate on plateau: 0.000001
-2022-06-01 20:27:40,296 P56037 INFO Early stopping at epoch=19
-2022-06-01 20:27:40,296 P56037 INFO --- 8058/8058 batches finished ---
-2022-06-01 20:27:40,335 P56037 INFO Train loss: 0.428634
-2022-06-01 20:27:40,335 P56037 INFO Training finished.
-2022-06-01 20:27:40,335 P56037 INFO Load best model: /cache/FuxiCTR/benchmarks/Criteo/AOANet_criteo_x1/criteo_x1_7b681156/AOANet_criteo_x1_005_faa15d7f.model
-2022-06-01 20:27:45,030 P56037 INFO ****** Validation evaluation ******
-2022-06-01 20:28:58,807 P56037 INFO [Metrics] AUC: 0.813704 - logloss: 0.438395
-2022-06-01 20:28:58,887 P56037 INFO ******** Test evaluation ********
-2022-06-01 20:28:58,887 P56037 INFO Loading data...
-2022-06-01 20:28:58,888 P56037 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/test.h5
-2022-06-01 20:28:59,677 P56037 INFO Test samples: total/4587167, pos/1174769, neg/3412398, ratio/25.61%, blocks/1
-2022-06-01 20:28:59,677 P56037 INFO Loading test data done.
-2022-06-01 20:29:40,641 P56037 INFO [Metrics] AUC: 0.814127 - logloss: 0.437825
-
-```
+## AOANet_criteo_x1
+
+A hands-on guide to run the AOANet model on the Criteo_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.6.4
+ pytorch: 1.0.0
+ pandas: 0.22.0
+ numpy: 1.19.2
+ scipy: 1.5.4
+ sklearn: 0.22.1
+ pyyaml: 5.4.1
+ h5py: 2.8.0
+ tqdm: 4.60.0
+ fuxictr: 1.2.1
+
+ ```
+
+### Dataset
+Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo#Criteo_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [AOANet](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/AOANet.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Criteo/Criteo_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [AOANet_criteo_x1_tuner_config_03](./AOANet_criteo_x1_tuner_config_03). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd AOANet_criteo_x1
+ nohup python run_expid.py --config ./AOANet_criteo_x1_tuner_config_03 --expid AOANet_criteo_x1_005_faa15d7f --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.814127 | 0.437825 |
+
+
+### Logs
+```python
+2022-06-01 14:01:59,484 P56037 INFO {
+ "batch_norm": "True",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x1_7b681156",
+ "debug": "False",
+ "dnn_hidden_activations": "ReLU",
+ "dnn_hidden_units": "[400, 400, 400]",
+ "embedding_dim": "10",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'type': 'numeric'}, {'active': True, 'dtype': 'float', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "gpu": "4",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "AOANet",
+ "model_id": "AOANet_criteo_x1_005_faa15d7f",
+ "model_root": "./Criteo/AOANet_criteo_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0.1",
+ "net_regularizer": "0",
+ "num_interaction_layers": "1",
+ "num_subspaces": "8",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x1/test.csv",
+ "train_data": "../data/Criteo/Criteo_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/Criteo/Criteo_x1/valid.csv",
+ "verbose": "0",
+ "version": "pytorch"
+}
+2022-06-01 14:01:59,484 P56037 INFO Set up feature encoder...
+2022-06-01 14:01:59,484 P56037 INFO Load feature_map from json: ../data/Criteo/criteo_x1_7b681156/feature_map.json
+2022-06-01 14:01:59,485 P56037 INFO Loading data...
+2022-06-01 14:01:59,485 P56037 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/train.h5
+2022-06-01 14:02:04,219 P56037 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/valid.h5
+2022-06-01 14:02:05,389 P56037 INFO Train samples: total/33003326, pos/8456369, neg/24546957, ratio/25.62%, blocks/1
+2022-06-01 14:02:05,389 P56037 INFO Validation samples: total/8250124, pos/2114300, neg/6135824, ratio/25.63%, blocks/1
+2022-06-01 14:02:05,389 P56037 INFO Loading train data done.
+2022-06-01 14:02:09,801 P56037 INFO Total number of parameters: 21356289.
+2022-06-01 14:02:09,802 P56037 INFO Start training: 8058 batches/epoch
+2022-06-01 14:02:09,802 P56037 INFO ************ Epoch=1 start ************
+2022-06-01 14:22:25,613 P56037 INFO [Metrics] AUC: 0.804139 - logloss: 0.447060
+2022-06-01 14:22:25,615 P56037 INFO Save best model: monitor(max): 0.804139
+2022-06-01 14:22:25,859 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 14:22:25,898 P56037 INFO Train loss: 0.461745
+2022-06-01 14:22:25,899 P56037 INFO ************ Epoch=1 end ************
+2022-06-01 14:42:40,739 P56037 INFO [Metrics] AUC: 0.806324 - logloss: 0.445000
+2022-06-01 14:42:40,740 P56037 INFO Save best model: monitor(max): 0.806324
+2022-06-01 14:42:40,852 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 14:42:40,897 P56037 INFO Train loss: 0.455928
+2022-06-01 14:42:40,897 P56037 INFO ************ Epoch=2 end ************
+2022-06-01 15:02:54,868 P56037 INFO [Metrics] AUC: 0.807531 - logloss: 0.443970
+2022-06-01 15:02:54,869 P56037 INFO Save best model: monitor(max): 0.807531
+2022-06-01 15:02:54,967 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 15:02:55,010 P56037 INFO Train loss: 0.454417
+2022-06-01 15:02:55,010 P56037 INFO ************ Epoch=3 end ************
+2022-06-01 15:23:10,668 P56037 INFO [Metrics] AUC: 0.808247 - logloss: 0.443340
+2022-06-01 15:23:10,669 P56037 INFO Save best model: monitor(max): 0.808247
+2022-06-01 15:23:10,780 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 15:23:10,817 P56037 INFO Train loss: 0.453657
+2022-06-01 15:23:10,817 P56037 INFO ************ Epoch=4 end ************
+2022-06-01 15:43:29,411 P56037 INFO [Metrics] AUC: 0.808612 - logloss: 0.442935
+2022-06-01 15:43:29,413 P56037 INFO Save best model: monitor(max): 0.808612
+2022-06-01 15:43:29,504 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 15:43:29,540 P56037 INFO Train loss: 0.453190
+2022-06-01 15:43:29,540 P56037 INFO ************ Epoch=5 end ************
+2022-06-01 16:03:45,367 P56037 INFO [Metrics] AUC: 0.808954 - logloss: 0.442598
+2022-06-01 16:03:45,368 P56037 INFO Save best model: monitor(max): 0.808954
+2022-06-01 16:03:45,468 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 16:03:45,507 P56037 INFO Train loss: 0.452838
+2022-06-01 16:03:45,507 P56037 INFO ************ Epoch=6 end ************
+2022-06-01 16:24:03,460 P56037 INFO [Metrics] AUC: 0.809303 - logloss: 0.442277
+2022-06-01 16:24:03,461 P56037 INFO Save best model: monitor(max): 0.809303
+2022-06-01 16:24:03,559 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 16:24:03,606 P56037 INFO Train loss: 0.452568
+2022-06-01 16:24:03,606 P56037 INFO ************ Epoch=7 end ************
+2022-06-01 16:44:21,524 P56037 INFO [Metrics] AUC: 0.809538 - logloss: 0.442141
+2022-06-01 16:44:21,525 P56037 INFO Save best model: monitor(max): 0.809538
+2022-06-01 16:44:21,625 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 16:44:21,666 P56037 INFO Train loss: 0.452320
+2022-06-01 16:44:21,666 P56037 INFO ************ Epoch=8 end ************
+2022-06-01 17:04:42,446 P56037 INFO [Metrics] AUC: 0.809621 - logloss: 0.442050
+2022-06-01 17:04:42,447 P56037 INFO Save best model: monitor(max): 0.809621
+2022-06-01 17:04:42,547 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 17:04:42,589 P56037 INFO Train loss: 0.452143
+2022-06-01 17:04:42,589 P56037 INFO ************ Epoch=9 end ************
+2022-06-01 17:25:04,632 P56037 INFO [Metrics] AUC: 0.809706 - logloss: 0.441934
+2022-06-01 17:25:04,633 P56037 INFO Save best model: monitor(max): 0.809706
+2022-06-01 17:25:04,729 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 17:25:04,774 P56037 INFO Train loss: 0.451989
+2022-06-01 17:25:04,774 P56037 INFO ************ Epoch=10 end ************
+2022-06-01 17:45:22,004 P56037 INFO [Metrics] AUC: 0.809965 - logloss: 0.441689
+2022-06-01 17:45:22,006 P56037 INFO Save best model: monitor(max): 0.809965
+2022-06-01 17:45:22,099 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 17:45:22,144 P56037 INFO Train loss: 0.451828
+2022-06-01 17:45:22,144 P56037 INFO ************ Epoch=11 end ************
+2022-06-01 18:05:44,276 P56037 INFO [Metrics] AUC: 0.810023 - logloss: 0.441629
+2022-06-01 18:05:44,277 P56037 INFO Save best model: monitor(max): 0.810023
+2022-06-01 18:05:44,367 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 18:05:44,409 P56037 INFO Train loss: 0.451707
+2022-06-01 18:05:44,410 P56037 INFO ************ Epoch=12 end ************
+2022-06-01 18:25:57,765 P56037 INFO [Metrics] AUC: 0.809893 - logloss: 0.441754
+2022-06-01 18:25:57,767 P56037 INFO Monitor(max) STOP: 0.809893 !
+2022-06-01 18:25:57,767 P56037 INFO Reduce learning rate on plateau: 0.000100
+2022-06-01 18:25:57,767 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 18:25:57,813 P56037 INFO Train loss: 0.451602
+2022-06-01 18:25:57,814 P56037 INFO ************ Epoch=13 end ************
+2022-06-01 18:46:18,280 P56037 INFO [Metrics] AUC: 0.813067 - logloss: 0.438869
+2022-06-01 18:46:18,281 P56037 INFO Save best model: monitor(max): 0.813067
+2022-06-01 18:46:18,375 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 18:46:18,423 P56037 INFO Train loss: 0.441052
+2022-06-01 18:46:18,423 P56037 INFO ************ Epoch=14 end ************
+2022-06-01 19:06:37,259 P56037 INFO [Metrics] AUC: 0.813573 - logloss: 0.438453
+2022-06-01 19:06:37,260 P56037 INFO Save best model: monitor(max): 0.813573
+2022-06-01 19:06:37,359 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 19:06:37,406 P56037 INFO Train loss: 0.437117
+2022-06-01 19:06:37,406 P56037 INFO ************ Epoch=15 end ************
+2022-06-01 19:26:55,152 P56037 INFO [Metrics] AUC: 0.813653 - logloss: 0.438349
+2022-06-01 19:26:55,154 P56037 INFO Save best model: monitor(max): 0.813653
+2022-06-01 19:26:55,266 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 19:26:55,316 P56037 INFO Train loss: 0.435369
+2022-06-01 19:26:55,316 P56037 INFO ************ Epoch=16 end ************
+2022-06-01 19:47:12,665 P56037 INFO [Metrics] AUC: 0.813704 - logloss: 0.438395
+2022-06-01 19:47:12,666 P56037 INFO Save best model: monitor(max): 0.813704
+2022-06-01 19:47:12,763 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 19:47:12,803 P56037 INFO Train loss: 0.434067
+2022-06-01 19:47:12,804 P56037 INFO ************ Epoch=17 end ************
+2022-06-01 20:07:27,152 P56037 INFO [Metrics] AUC: 0.813549 - logloss: 0.438590
+2022-06-01 20:07:27,153 P56037 INFO Monitor(max) STOP: 0.813549 !
+2022-06-01 20:07:27,153 P56037 INFO Reduce learning rate on plateau: 0.000010
+2022-06-01 20:07:27,153 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 20:07:27,196 P56037 INFO Train loss: 0.432977
+2022-06-01 20:07:27,196 P56037 INFO ************ Epoch=18 end ************
+2022-06-01 20:27:40,295 P56037 INFO [Metrics] AUC: 0.813055 - logloss: 0.439342
+2022-06-01 20:27:40,296 P56037 INFO Monitor(max) STOP: 0.813055 !
+2022-06-01 20:27:40,296 P56037 INFO Reduce learning rate on plateau: 0.000001
+2022-06-01 20:27:40,296 P56037 INFO Early stopping at epoch=19
+2022-06-01 20:27:40,296 P56037 INFO --- 8058/8058 batches finished ---
+2022-06-01 20:27:40,335 P56037 INFO Train loss: 0.428634
+2022-06-01 20:27:40,335 P56037 INFO Training finished.
+2022-06-01 20:27:40,335 P56037 INFO Load best model: /cache/FuxiCTR/benchmarks/Criteo/AOANet_criteo_x1/criteo_x1_7b681156/AOANet_criteo_x1_005_faa15d7f.model
+2022-06-01 20:27:45,030 P56037 INFO ****** Validation evaluation ******
+2022-06-01 20:28:58,807 P56037 INFO [Metrics] AUC: 0.813704 - logloss: 0.438395
+2022-06-01 20:28:58,887 P56037 INFO ******** Test evaluation ********
+2022-06-01 20:28:58,887 P56037 INFO Loading data...
+2022-06-01 20:28:58,888 P56037 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/test.h5
+2022-06-01 20:28:59,677 P56037 INFO Test samples: total/4587167, pos/1174769, neg/3412398, ratio/25.61%, blocks/1
+2022-06-01 20:28:59,677 P56037 INFO Loading test data done.
+2022-06-01 20:29:40,641 P56037 INFO [Metrics] AUC: 0.814127 - logloss: 0.437825
+
+```
diff --git a/ranking/ctr/AOANet/AOANet_frappe_x1/README.md b/ranking/ctr/AOANet/AOANet_frappe_x1/README.md
index fb122656..af41ca7b 100644
--- a/ranking/ctr/AOANet/AOANet_frappe_x1/README.md
+++ b/ranking/ctr/AOANet/AOANet_frappe_x1/README.md
@@ -1,216 +1,216 @@
-## AOANet_frappe_x1
-
-A hands-on guide to run the AOANet model on the Frappe_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 11.4
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.2.1
- ```
-
-### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe#Frappe_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [AOANet](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/AOANet.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Frappe/Frappe_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [AOANet_frappe_x1_tuner_config_02](./AOANet_frappe_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd AOANet_frappe_x1
- nohup python run_expid.py --config ./AOANet_frappe_x1_tuner_config_02 --expid AOANet_frappe_x1_009_29c57772 --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.984400 | 0.142379 |
-
-
-### Logs
-```python
-2022-04-13 00:36:26,508 P32306 INFO {
- "batch_norm": "True",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Frappe/",
- "dataset_id": "frappe_x1_04e961e9",
- "debug": "False",
- "dnn_hidden_activations": "ReLU",
- "dnn_hidden_units": "[400, 400, 400]",
- "embedding_dim": "10",
- "embedding_regularizer": "0.1",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user', 'item', 'daytime', 'weekday', 'isweekend', 'homework', 'cost', 'weather', 'country', 'city'], 'type': 'categorical'}]",
- "gpu": "0",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "AOANet",
- "model_id": "AOANet_frappe_x1_009_29c57772",
- "model_root": "./Frappe/AOANet_frappe_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_dropout": "0.3",
- "net_regularizer": "0",
- "num_interaction_layers": "1",
- "num_subspaces": "2",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Frappe/Frappe_x1/test.csv",
- "train_data": "../data/Frappe/Frappe_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/Frappe/Frappe_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch"
-}
-2022-04-13 00:36:26,509 P32306 INFO Set up feature encoder...
-2022-04-13 00:36:26,509 P32306 INFO Load feature_map from json: ../data/Frappe/frappe_x1_04e961e9/feature_map.json
-2022-04-13 00:36:26,510 P32306 INFO Loading data...
-2022-04-13 00:36:26,513 P32306 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/train.h5
-2022-04-13 00:36:26,525 P32306 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/valid.h5
-2022-04-13 00:36:26,530 P32306 INFO Train samples: total/202027, pos/67604, neg/134423, ratio/33.46%, blocks/1
-2022-04-13 00:36:26,530 P32306 INFO Validation samples: total/57722, pos/19063, neg/38659, ratio/33.03%, blocks/1
-2022-04-13 00:36:26,530 P32306 INFO Loading train data done.
-2022-04-13 00:36:30,988 P32306 INFO Total number of parameters: 418331.
-2022-04-13 00:36:30,989 P32306 INFO Start training: 50 batches/epoch
-2022-04-13 00:36:30,989 P32306 INFO ************ Epoch=1 start ************
-2022-04-13 00:36:39,671 P32306 INFO [Metrics] AUC: 0.936314 - logloss: 0.727013
-2022-04-13 00:36:39,672 P32306 INFO Save best model: monitor(max): 0.936314
-2022-04-13 00:36:39,676 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:36:39,712 P32306 INFO Train loss: 0.394749
-2022-04-13 00:36:39,712 P32306 INFO ************ Epoch=1 end ************
-2022-04-13 00:36:48,472 P32306 INFO [Metrics] AUC: 0.954446 - logloss: 0.268002
-2022-04-13 00:36:48,473 P32306 INFO Save best model: monitor(max): 0.954446
-2022-04-13 00:36:48,479 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:36:48,520 P32306 INFO Train loss: 0.293335
-2022-04-13 00:36:48,520 P32306 INFO ************ Epoch=2 end ************
-2022-04-13 00:36:55,561 P32306 INFO [Metrics] AUC: 0.966939 - logloss: 0.216872
-2022-04-13 00:36:55,562 P32306 INFO Save best model: monitor(max): 0.966939
-2022-04-13 00:36:55,566 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:36:55,602 P32306 INFO Train loss: 0.262797
-2022-04-13 00:36:55,602 P32306 INFO ************ Epoch=3 end ************
-2022-04-13 00:37:02,344 P32306 INFO [Metrics] AUC: 0.972106 - logloss: 0.190556
-2022-04-13 00:37:02,345 P32306 INFO Save best model: monitor(max): 0.972106
-2022-04-13 00:37:02,351 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:37:02,389 P32306 INFO Train loss: 0.246463
-2022-04-13 00:37:02,389 P32306 INFO ************ Epoch=4 end ************
-2022-04-13 00:37:11,055 P32306 INFO [Metrics] AUC: 0.973515 - logloss: 0.188421
-2022-04-13 00:37:11,055 P32306 INFO Save best model: monitor(max): 0.973515
-2022-04-13 00:37:11,061 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:37:11,115 P32306 INFO Train loss: 0.237570
-2022-04-13 00:37:11,116 P32306 INFO ************ Epoch=5 end ************
-2022-04-13 00:37:19,571 P32306 INFO [Metrics] AUC: 0.974107 - logloss: 0.228146
-2022-04-13 00:37:19,572 P32306 INFO Save best model: monitor(max): 0.974107
-2022-04-13 00:37:19,579 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:37:19,623 P32306 INFO Train loss: 0.232030
-2022-04-13 00:37:19,623 P32306 INFO ************ Epoch=6 end ************
-2022-04-13 00:37:28,111 P32306 INFO [Metrics] AUC: 0.973322 - logloss: 0.190663
-2022-04-13 00:37:28,112 P32306 INFO Monitor(max) STOP: 0.973322 !
-2022-04-13 00:37:28,112 P32306 INFO Reduce learning rate on plateau: 0.000100
-2022-04-13 00:37:28,112 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:37:28,165 P32306 INFO Train loss: 0.227103
-2022-04-13 00:37:28,165 P32306 INFO ************ Epoch=7 end ************
-2022-04-13 00:37:36,660 P32306 INFO [Metrics] AUC: 0.981090 - logloss: 0.150912
-2022-04-13 00:37:36,661 P32306 INFO Save best model: monitor(max): 0.981090
-2022-04-13 00:37:36,665 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:37:36,711 P32306 INFO Train loss: 0.193946
-2022-04-13 00:37:36,711 P32306 INFO ************ Epoch=8 end ************
-2022-04-13 00:37:45,232 P32306 INFO [Metrics] AUC: 0.983051 - logloss: 0.143658
-2022-04-13 00:37:45,233 P32306 INFO Save best model: monitor(max): 0.983051
-2022-04-13 00:37:45,238 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:37:45,296 P32306 INFO Train loss: 0.158726
-2022-04-13 00:37:45,296 P32306 INFO ************ Epoch=9 end ************
-2022-04-13 00:37:53,806 P32306 INFO [Metrics] AUC: 0.983878 - logloss: 0.142119
-2022-04-13 00:37:53,807 P32306 INFO Save best model: monitor(max): 0.983878
-2022-04-13 00:37:53,813 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:37:53,867 P32306 INFO Train loss: 0.136406
-2022-04-13 00:37:53,867 P32306 INFO ************ Epoch=10 end ************
-2022-04-13 00:38:02,417 P32306 INFO [Metrics] AUC: 0.984196 - logloss: 0.142770
-2022-04-13 00:38:02,418 P32306 INFO Save best model: monitor(max): 0.984196
-2022-04-13 00:38:02,424 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:38:02,465 P32306 INFO Train loss: 0.120680
-2022-04-13 00:38:02,465 P32306 INFO ************ Epoch=11 end ************
-2022-04-13 00:38:11,098 P32306 INFO [Metrics] AUC: 0.984486 - logloss: 0.142689
-2022-04-13 00:38:11,099 P32306 INFO Save best model: monitor(max): 0.984486
-2022-04-13 00:38:11,105 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:38:11,153 P32306 INFO Train loss: 0.108975
-2022-04-13 00:38:11,153 P32306 INFO ************ Epoch=12 end ************
-2022-04-13 00:38:19,736 P32306 INFO [Metrics] AUC: 0.984972 - logloss: 0.142321
-2022-04-13 00:38:19,736 P32306 INFO Save best model: monitor(max): 0.984972
-2022-04-13 00:38:19,742 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:38:19,792 P32306 INFO Train loss: 0.100400
-2022-04-13 00:38:19,792 P32306 INFO ************ Epoch=13 end ************
-2022-04-13 00:38:28,465 P32306 INFO [Metrics] AUC: 0.984567 - logloss: 0.146438
-2022-04-13 00:38:28,466 P32306 INFO Monitor(max) STOP: 0.984567 !
-2022-04-13 00:38:28,466 P32306 INFO Reduce learning rate on plateau: 0.000010
-2022-04-13 00:38:28,466 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:38:28,503 P32306 INFO Train loss: 0.094982
-2022-04-13 00:38:28,503 P32306 INFO ************ Epoch=14 end ************
-2022-04-13 00:38:36,986 P32306 INFO [Metrics] AUC: 0.984691 - logloss: 0.146166
-2022-04-13 00:38:36,987 P32306 INFO Monitor(max) STOP: 0.984691 !
-2022-04-13 00:38:36,987 P32306 INFO Reduce learning rate on plateau: 0.000001
-2022-04-13 00:38:36,987 P32306 INFO Early stopping at epoch=15
-2022-04-13 00:38:36,987 P32306 INFO --- 50/50 batches finished ---
-2022-04-13 00:38:37,021 P32306 INFO Train loss: 0.085368
-2022-04-13 00:38:37,021 P32306 INFO Training finished.
-2022-04-13 00:38:37,021 P32306 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Frappe/AOANet_frappe_x1/frappe_x1_04e961e9/AOANet_frappe_x1_009_29c57772.model
-2022-04-13 00:38:37,074 P32306 INFO ****** Validation evaluation ******
-2022-04-13 00:38:37,550 P32306 INFO [Metrics] AUC: 0.984972 - logloss: 0.142321
-2022-04-13 00:38:37,583 P32306 INFO ******** Test evaluation ********
-2022-04-13 00:38:37,583 P32306 INFO Loading data...
-2022-04-13 00:38:37,584 P32306 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/test.h5
-2022-04-13 00:38:37,587 P32306 INFO Test samples: total/28860, pos/9536, neg/19324, ratio/33.04%, blocks/1
-2022-04-13 00:38:37,587 P32306 INFO Loading test data done.
-2022-04-13 00:38:37,956 P32306 INFO [Metrics] AUC: 0.984400 - logloss: 0.142379
-
-```
+## AOANet_frappe_x1
+
+A hands-on guide to run the AOANet model on the Frappe_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 11.4
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.2.1
+ ```
+
+### Dataset
+Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe#Frappe_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [AOANet](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/AOANet.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Frappe/Frappe_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [AOANet_frappe_x1_tuner_config_02](./AOANet_frappe_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd AOANet_frappe_x1
+ nohup python run_expid.py --config ./AOANet_frappe_x1_tuner_config_02 --expid AOANet_frappe_x1_009_29c57772 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.984400 | 0.142379 |
+
+
+### Logs
+```python
+2022-04-13 00:36:26,508 P32306 INFO {
+ "batch_norm": "True",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Frappe/",
+ "dataset_id": "frappe_x1_04e961e9",
+ "debug": "False",
+ "dnn_hidden_activations": "ReLU",
+ "dnn_hidden_units": "[400, 400, 400]",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.1",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user', 'item', 'daytime', 'weekday', 'isweekend', 'homework', 'cost', 'weather', 'country', 'city'], 'type': 'categorical'}]",
+ "gpu": "0",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "AOANet",
+ "model_id": "AOANet_frappe_x1_009_29c57772",
+ "model_root": "./Frappe/AOANet_frappe_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0.3",
+ "net_regularizer": "0",
+ "num_interaction_layers": "1",
+ "num_subspaces": "2",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Frappe/Frappe_x1/test.csv",
+ "train_data": "../data/Frappe/Frappe_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/Frappe/Frappe_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch"
+}
+2022-04-13 00:36:26,509 P32306 INFO Set up feature encoder...
+2022-04-13 00:36:26,509 P32306 INFO Load feature_map from json: ../data/Frappe/frappe_x1_04e961e9/feature_map.json
+2022-04-13 00:36:26,510 P32306 INFO Loading data...
+2022-04-13 00:36:26,513 P32306 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/train.h5
+2022-04-13 00:36:26,525 P32306 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/valid.h5
+2022-04-13 00:36:26,530 P32306 INFO Train samples: total/202027, pos/67604, neg/134423, ratio/33.46%, blocks/1
+2022-04-13 00:36:26,530 P32306 INFO Validation samples: total/57722, pos/19063, neg/38659, ratio/33.03%, blocks/1
+2022-04-13 00:36:26,530 P32306 INFO Loading train data done.
+2022-04-13 00:36:30,988 P32306 INFO Total number of parameters: 418331.
+2022-04-13 00:36:30,989 P32306 INFO Start training: 50 batches/epoch
+2022-04-13 00:36:30,989 P32306 INFO ************ Epoch=1 start ************
+2022-04-13 00:36:39,671 P32306 INFO [Metrics] AUC: 0.936314 - logloss: 0.727013
+2022-04-13 00:36:39,672 P32306 INFO Save best model: monitor(max): 0.936314
+2022-04-13 00:36:39,676 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:36:39,712 P32306 INFO Train loss: 0.394749
+2022-04-13 00:36:39,712 P32306 INFO ************ Epoch=1 end ************
+2022-04-13 00:36:48,472 P32306 INFO [Metrics] AUC: 0.954446 - logloss: 0.268002
+2022-04-13 00:36:48,473 P32306 INFO Save best model: monitor(max): 0.954446
+2022-04-13 00:36:48,479 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:36:48,520 P32306 INFO Train loss: 0.293335
+2022-04-13 00:36:48,520 P32306 INFO ************ Epoch=2 end ************
+2022-04-13 00:36:55,561 P32306 INFO [Metrics] AUC: 0.966939 - logloss: 0.216872
+2022-04-13 00:36:55,562 P32306 INFO Save best model: monitor(max): 0.966939
+2022-04-13 00:36:55,566 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:36:55,602 P32306 INFO Train loss: 0.262797
+2022-04-13 00:36:55,602 P32306 INFO ************ Epoch=3 end ************
+2022-04-13 00:37:02,344 P32306 INFO [Metrics] AUC: 0.972106 - logloss: 0.190556
+2022-04-13 00:37:02,345 P32306 INFO Save best model: monitor(max): 0.972106
+2022-04-13 00:37:02,351 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:37:02,389 P32306 INFO Train loss: 0.246463
+2022-04-13 00:37:02,389 P32306 INFO ************ Epoch=4 end ************
+2022-04-13 00:37:11,055 P32306 INFO [Metrics] AUC: 0.973515 - logloss: 0.188421
+2022-04-13 00:37:11,055 P32306 INFO Save best model: monitor(max): 0.973515
+2022-04-13 00:37:11,061 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:37:11,115 P32306 INFO Train loss: 0.237570
+2022-04-13 00:37:11,116 P32306 INFO ************ Epoch=5 end ************
+2022-04-13 00:37:19,571 P32306 INFO [Metrics] AUC: 0.974107 - logloss: 0.228146
+2022-04-13 00:37:19,572 P32306 INFO Save best model: monitor(max): 0.974107
+2022-04-13 00:37:19,579 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:37:19,623 P32306 INFO Train loss: 0.232030
+2022-04-13 00:37:19,623 P32306 INFO ************ Epoch=6 end ************
+2022-04-13 00:37:28,111 P32306 INFO [Metrics] AUC: 0.973322 - logloss: 0.190663
+2022-04-13 00:37:28,112 P32306 INFO Monitor(max) STOP: 0.973322 !
+2022-04-13 00:37:28,112 P32306 INFO Reduce learning rate on plateau: 0.000100
+2022-04-13 00:37:28,112 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:37:28,165 P32306 INFO Train loss: 0.227103
+2022-04-13 00:37:28,165 P32306 INFO ************ Epoch=7 end ************
+2022-04-13 00:37:36,660 P32306 INFO [Metrics] AUC: 0.981090 - logloss: 0.150912
+2022-04-13 00:37:36,661 P32306 INFO Save best model: monitor(max): 0.981090
+2022-04-13 00:37:36,665 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:37:36,711 P32306 INFO Train loss: 0.193946
+2022-04-13 00:37:36,711 P32306 INFO ************ Epoch=8 end ************
+2022-04-13 00:37:45,232 P32306 INFO [Metrics] AUC: 0.983051 - logloss: 0.143658
+2022-04-13 00:37:45,233 P32306 INFO Save best model: monitor(max): 0.983051
+2022-04-13 00:37:45,238 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:37:45,296 P32306 INFO Train loss: 0.158726
+2022-04-13 00:37:45,296 P32306 INFO ************ Epoch=9 end ************
+2022-04-13 00:37:53,806 P32306 INFO [Metrics] AUC: 0.983878 - logloss: 0.142119
+2022-04-13 00:37:53,807 P32306 INFO Save best model: monitor(max): 0.983878
+2022-04-13 00:37:53,813 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:37:53,867 P32306 INFO Train loss: 0.136406
+2022-04-13 00:37:53,867 P32306 INFO ************ Epoch=10 end ************
+2022-04-13 00:38:02,417 P32306 INFO [Metrics] AUC: 0.984196 - logloss: 0.142770
+2022-04-13 00:38:02,418 P32306 INFO Save best model: monitor(max): 0.984196
+2022-04-13 00:38:02,424 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:38:02,465 P32306 INFO Train loss: 0.120680
+2022-04-13 00:38:02,465 P32306 INFO ************ Epoch=11 end ************
+2022-04-13 00:38:11,098 P32306 INFO [Metrics] AUC: 0.984486 - logloss: 0.142689
+2022-04-13 00:38:11,099 P32306 INFO Save best model: monitor(max): 0.984486
+2022-04-13 00:38:11,105 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:38:11,153 P32306 INFO Train loss: 0.108975
+2022-04-13 00:38:11,153 P32306 INFO ************ Epoch=12 end ************
+2022-04-13 00:38:19,736 P32306 INFO [Metrics] AUC: 0.984972 - logloss: 0.142321
+2022-04-13 00:38:19,736 P32306 INFO Save best model: monitor(max): 0.984972
+2022-04-13 00:38:19,742 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:38:19,792 P32306 INFO Train loss: 0.100400
+2022-04-13 00:38:19,792 P32306 INFO ************ Epoch=13 end ************
+2022-04-13 00:38:28,465 P32306 INFO [Metrics] AUC: 0.984567 - logloss: 0.146438
+2022-04-13 00:38:28,466 P32306 INFO Monitor(max) STOP: 0.984567 !
+2022-04-13 00:38:28,466 P32306 INFO Reduce learning rate on plateau: 0.000010
+2022-04-13 00:38:28,466 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:38:28,503 P32306 INFO Train loss: 0.094982
+2022-04-13 00:38:28,503 P32306 INFO ************ Epoch=14 end ************
+2022-04-13 00:38:36,986 P32306 INFO [Metrics] AUC: 0.984691 - logloss: 0.146166
+2022-04-13 00:38:36,987 P32306 INFO Monitor(max) STOP: 0.984691 !
+2022-04-13 00:38:36,987 P32306 INFO Reduce learning rate on plateau: 0.000001
+2022-04-13 00:38:36,987 P32306 INFO Early stopping at epoch=15
+2022-04-13 00:38:36,987 P32306 INFO --- 50/50 batches finished ---
+2022-04-13 00:38:37,021 P32306 INFO Train loss: 0.085368
+2022-04-13 00:38:37,021 P32306 INFO Training finished.
+2022-04-13 00:38:37,021 P32306 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Frappe/AOANet_frappe_x1/frappe_x1_04e961e9/AOANet_frappe_x1_009_29c57772.model
+2022-04-13 00:38:37,074 P32306 INFO ****** Validation evaluation ******
+2022-04-13 00:38:37,550 P32306 INFO [Metrics] AUC: 0.984972 - logloss: 0.142321
+2022-04-13 00:38:37,583 P32306 INFO ******** Test evaluation ********
+2022-04-13 00:38:37,583 P32306 INFO Loading data...
+2022-04-13 00:38:37,584 P32306 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/test.h5
+2022-04-13 00:38:37,587 P32306 INFO Test samples: total/28860, pos/9536, neg/19324, ratio/33.04%, blocks/1
+2022-04-13 00:38:37,587 P32306 INFO Loading test data done.
+2022-04-13 00:38:37,956 P32306 INFO [Metrics] AUC: 0.984400 - logloss: 0.142379
+
+```
diff --git a/ranking/ctr/AOANet/AOANet_kuaivideo_x1/README.md b/ranking/ctr/AOANet/AOANet_kuaivideo_x1/README.md
index 4bb489f6..0a170971 100644
--- a/ranking/ctr/AOANet/AOANet_kuaivideo_x1/README.md
+++ b/ranking/ctr/AOANet/AOANet_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AOANet model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [AOANet](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/AOANet) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [AOANet](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/AOANet) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/AOANet/AOANet_microvideo1.7m_x1/README.md b/ranking/ctr/AOANet/AOANet_microvideo1.7m_x1/README.md
index 4d941dc4..8041ebf4 100644
--- a/ranking/ctr/AOANet/AOANet_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/AOANet/AOANet_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AOANet model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [AOANet](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/AOANet) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [AOANet](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/AOANet) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/AOANet/AOANet_movielenslatest_x1/README.md b/ranking/ctr/AOANet/AOANet_movielenslatest_x1/README.md
index fafe054c..671d9957 100644
--- a/ranking/ctr/AOANet/AOANet_movielenslatest_x1/README.md
+++ b/ranking/ctr/AOANet/AOANet_movielenslatest_x1/README.md
@@ -1,231 +1,231 @@
-## AOANet_movielenslatest_x1
-
-A hands-on guide to run the AOANet model on the MovielensLatest_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 11.4
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.2.1
- ```
-
-### Dataset
-Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [AOANet](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/AOANet.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [AOANet_movielenslatest_x1_tuner_config_01](./AOANet_movielenslatest_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd AOANet_movielenslatest_x1
- nohup python run_expid.py --config ./AOANet_movielenslatest_x1_tuner_config_01 --expid AOANet_movielenslatest_x1_031_bbf8c17a --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.969401 | 0.210458 |
-
-
-### Logs
-```python
-2022-05-31 18:46:05,745 P16920 INFO {
- "batch_norm": "True",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Movielens/",
- "dataset_id": "movielenslatest_x1_cd32d937",
- "debug": "False",
- "dnn_hidden_activations": "ReLU",
- "dnn_hidden_units": "[400, 400, 400]",
- "embedding_dim": "10",
- "embedding_regularizer": "0.01",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user_id', 'item_id', 'tag_id'], 'type': 'categorical'}]",
- "gpu": "0",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "AOANet",
- "model_id": "AOANet_movielenslatest_x1_031_bbf8c17a",
- "model_root": "./Movielens/AOANet_movielenslatest_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_dropout": "0.3",
- "net_regularizer": "0",
- "num_interaction_layers": "3",
- "num_subspaces": "2",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
- "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch"
-}
-2022-05-31 18:46:05,746 P16920 INFO Set up feature encoder...
-2022-05-31 18:46:05,746 P16920 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_cd32d937/feature_map.json
-2022-05-31 18:46:05,746 P16920 INFO Loading data...
-2022-05-31 18:46:05,749 P16920 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/train.h5
-2022-05-31 18:46:05,777 P16920 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/valid.h5
-2022-05-31 18:46:05,785 P16920 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
-2022-05-31 18:46:05,785 P16920 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
-2022-05-31 18:46:05,785 P16920 INFO Loading train data done.
-2022-05-31 18:46:09,635 P16920 INFO Total number of parameters: 1239113.
-2022-05-31 18:46:09,635 P16920 INFO Start training: 343 batches/epoch
-2022-05-31 18:46:09,636 P16920 INFO ************ Epoch=1 start ************
-2022-05-31 18:46:41,510 P16920 INFO [Metrics] AUC: 0.933901 - logloss: 0.292700
-2022-05-31 18:46:41,511 P16920 INFO Save best model: monitor(max): 0.933901
-2022-05-31 18:46:41,520 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:46:41,559 P16920 INFO Train loss: 0.377147
-2022-05-31 18:46:41,559 P16920 INFO ************ Epoch=1 end ************
-2022-05-31 18:47:13,297 P16920 INFO [Metrics] AUC: 0.944238 - logloss: 0.272118
-2022-05-31 18:47:13,299 P16920 INFO Save best model: monitor(max): 0.944238
-2022-05-31 18:47:13,310 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:47:13,361 P16920 INFO Train loss: 0.361309
-2022-05-31 18:47:13,361 P16920 INFO ************ Epoch=2 end ************
-2022-05-31 18:47:45,052 P16920 INFO [Metrics] AUC: 0.948313 - logloss: 0.261492
-2022-05-31 18:47:45,053 P16920 INFO Save best model: monitor(max): 0.948313
-2022-05-31 18:47:45,064 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:47:45,112 P16920 INFO Train loss: 0.362418
-2022-05-31 18:47:45,112 P16920 INFO ************ Epoch=3 end ************
-2022-05-31 18:48:16,983 P16920 INFO [Metrics] AUC: 0.950069 - logloss: 0.250828
-2022-05-31 18:48:16,984 P16920 INFO Save best model: monitor(max): 0.950069
-2022-05-31 18:48:16,995 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:48:17,037 P16920 INFO Train loss: 0.365230
-2022-05-31 18:48:17,037 P16920 INFO ************ Epoch=4 end ************
-2022-05-31 18:48:48,797 P16920 INFO [Metrics] AUC: 0.951998 - logloss: 0.246179
-2022-05-31 18:48:48,798 P16920 INFO Save best model: monitor(max): 0.951998
-2022-05-31 18:48:48,809 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:48:48,882 P16920 INFO Train loss: 0.368801
-2022-05-31 18:48:48,882 P16920 INFO ************ Epoch=5 end ************
-2022-05-31 18:49:16,351 P16920 INFO [Metrics] AUC: 0.952251 - logloss: 0.244044
-2022-05-31 18:49:16,352 P16920 INFO Save best model: monitor(max): 0.952251
-2022-05-31 18:49:16,362 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:49:16,412 P16920 INFO Train loss: 0.371365
-2022-05-31 18:49:16,412 P16920 INFO ************ Epoch=6 end ************
-2022-05-31 18:49:48,759 P16920 INFO [Metrics] AUC: 0.953469 - logloss: 0.241305
-2022-05-31 18:49:48,760 P16920 INFO Save best model: monitor(max): 0.953469
-2022-05-31 18:49:48,768 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:49:48,823 P16920 INFO Train loss: 0.373331
-2022-05-31 18:49:48,823 P16920 INFO ************ Epoch=7 end ************
-2022-05-31 18:50:21,229 P16920 INFO [Metrics] AUC: 0.953871 - logloss: 0.240481
-2022-05-31 18:50:21,230 P16920 INFO Save best model: monitor(max): 0.953871
-2022-05-31 18:50:21,238 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:50:21,292 P16920 INFO Train loss: 0.374384
-2022-05-31 18:50:21,292 P16920 INFO ************ Epoch=8 end ************
-2022-05-31 18:50:50,300 P16920 INFO [Metrics] AUC: 0.954370 - logloss: 0.240454
-2022-05-31 18:50:50,301 P16920 INFO Save best model: monitor(max): 0.954370
-2022-05-31 18:50:50,313 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:50:50,372 P16920 INFO Train loss: 0.374569
-2022-05-31 18:50:50,372 P16920 INFO ************ Epoch=9 end ************
-2022-05-31 18:51:26,099 P16920 INFO [Metrics] AUC: 0.954567 - logloss: 0.239315
-2022-05-31 18:51:26,100 P16920 INFO Save best model: monitor(max): 0.954567
-2022-05-31 18:51:26,110 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:51:26,148 P16920 INFO Train loss: 0.374840
-2022-05-31 18:51:26,148 P16920 INFO ************ Epoch=10 end ************
-2022-05-31 18:52:01,834 P16920 INFO [Metrics] AUC: 0.955103 - logloss: 0.238806
-2022-05-31 18:52:01,836 P16920 INFO Save best model: monitor(max): 0.955103
-2022-05-31 18:52:01,847 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:52:01,921 P16920 INFO Train loss: 0.375471
-2022-05-31 18:52:01,922 P16920 INFO ************ Epoch=11 end ************
-2022-05-31 18:52:37,467 P16920 INFO [Metrics] AUC: 0.955896 - logloss: 0.235422
-2022-05-31 18:52:37,468 P16920 INFO Save best model: monitor(max): 0.955896
-2022-05-31 18:52:37,478 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:52:37,519 P16920 INFO Train loss: 0.376710
-2022-05-31 18:52:37,519 P16920 INFO ************ Epoch=12 end ************
-2022-05-31 18:53:13,017 P16920 INFO [Metrics] AUC: 0.956460 - logloss: 0.236233
-2022-05-31 18:53:13,017 P16920 INFO Save best model: monitor(max): 0.956460
-2022-05-31 18:53:13,025 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:53:13,080 P16920 INFO Train loss: 0.375659
-2022-05-31 18:53:13,080 P16920 INFO ************ Epoch=13 end ************
-2022-05-31 18:53:48,775 P16920 INFO [Metrics] AUC: 0.956391 - logloss: 0.232838
-2022-05-31 18:53:48,776 P16920 INFO Monitor(max) STOP: 0.956391 !
-2022-05-31 18:53:48,777 P16920 INFO Reduce learning rate on plateau: 0.000100
-2022-05-31 18:53:48,777 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:53:48,819 P16920 INFO Train loss: 0.376075
-2022-05-31 18:53:48,819 P16920 INFO ************ Epoch=14 end ************
-2022-05-31 18:54:24,283 P16920 INFO [Metrics] AUC: 0.967931 - logloss: 0.203813
-2022-05-31 18:54:24,283 P16920 INFO Save best model: monitor(max): 0.967931
-2022-05-31 18:54:24,291 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:54:24,341 P16920 INFO Train loss: 0.276614
-2022-05-31 18:54:24,341 P16920 INFO ************ Epoch=15 end ************
-2022-05-31 18:55:00,032 P16920 INFO [Metrics] AUC: 0.969771 - logloss: 0.208504
-2022-05-31 18:55:00,033 P16920 INFO Save best model: monitor(max): 0.969771
-2022-05-31 18:55:00,043 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:55:00,095 P16920 INFO Train loss: 0.191585
-2022-05-31 18:55:00,096 P16920 INFO ************ Epoch=16 end ************
-2022-05-31 18:55:31,185 P16920 INFO [Metrics] AUC: 0.968663 - logloss: 0.228933
-2022-05-31 18:55:31,186 P16920 INFO Monitor(max) STOP: 0.968663 !
-2022-05-31 18:55:31,186 P16920 INFO Reduce learning rate on plateau: 0.000010
-2022-05-31 18:55:31,186 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:55:31,235 P16920 INFO Train loss: 0.148550
-2022-05-31 18:55:31,236 P16920 INFO ************ Epoch=17 end ************
-2022-05-31 18:55:57,231 P16920 INFO [Metrics] AUC: 0.968163 - logloss: 0.243116
-2022-05-31 18:55:57,232 P16920 INFO Monitor(max) STOP: 0.968163 !
-2022-05-31 18:55:57,232 P16920 INFO Reduce learning rate on plateau: 0.000001
-2022-05-31 18:55:57,232 P16920 INFO Early stopping at epoch=18
-2022-05-31 18:55:57,233 P16920 INFO --- 343/343 batches finished ---
-2022-05-31 18:55:57,330 P16920 INFO Train loss: 0.116393
-2022-05-31 18:55:57,330 P16920 INFO Training finished.
-2022-05-31 18:55:57,331 P16920 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Movielens/AOANet_movielenslatest_x1/movielenslatest_x1_cd32d937/AOANet_movielenslatest_x1_031_bbf8c17a.model
-2022-05-31 18:55:57,354 P16920 INFO ****** Validation evaluation ******
-2022-05-31 18:55:59,102 P16920 INFO [Metrics] AUC: 0.969771 - logloss: 0.208504
-2022-05-31 18:55:59,149 P16920 INFO ******** Test evaluation ********
-2022-05-31 18:55:59,150 P16920 INFO Loading data...
-2022-05-31 18:55:59,150 P16920 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/test.h5
-2022-05-31 18:55:59,156 P16920 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
-2022-05-31 18:55:59,156 P16920 INFO Loading test data done.
-2022-05-31 18:56:00,022 P16920 INFO [Metrics] AUC: 0.969401 - logloss: 0.210458
-
-```
+## AOANet_movielenslatest_x1
+
+A hands-on guide to run the AOANet model on the MovielensLatest_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 11.4
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.2.1
+ ```
+
+### Dataset
+Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [AOANet](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/AOANet.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [AOANet_movielenslatest_x1_tuner_config_01](./AOANet_movielenslatest_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd AOANet_movielenslatest_x1
+ nohup python run_expid.py --config ./AOANet_movielenslatest_x1_tuner_config_01 --expid AOANet_movielenslatest_x1_031_bbf8c17a --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.969401 | 0.210458 |
+
+
+### Logs
+```python
+2022-05-31 18:46:05,745 P16920 INFO {
+ "batch_norm": "True",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Movielens/",
+ "dataset_id": "movielenslatest_x1_cd32d937",
+ "debug": "False",
+ "dnn_hidden_activations": "ReLU",
+ "dnn_hidden_units": "[400, 400, 400]",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.01",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user_id', 'item_id', 'tag_id'], 'type': 'categorical'}]",
+ "gpu": "0",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "AOANet",
+ "model_id": "AOANet_movielenslatest_x1_031_bbf8c17a",
+ "model_root": "./Movielens/AOANet_movielenslatest_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0.3",
+ "net_regularizer": "0",
+ "num_interaction_layers": "3",
+ "num_subspaces": "2",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
+ "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch"
+}
+2022-05-31 18:46:05,746 P16920 INFO Set up feature encoder...
+2022-05-31 18:46:05,746 P16920 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_cd32d937/feature_map.json
+2022-05-31 18:46:05,746 P16920 INFO Loading data...
+2022-05-31 18:46:05,749 P16920 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/train.h5
+2022-05-31 18:46:05,777 P16920 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/valid.h5
+2022-05-31 18:46:05,785 P16920 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
+2022-05-31 18:46:05,785 P16920 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
+2022-05-31 18:46:05,785 P16920 INFO Loading train data done.
+2022-05-31 18:46:09,635 P16920 INFO Total number of parameters: 1239113.
+2022-05-31 18:46:09,635 P16920 INFO Start training: 343 batches/epoch
+2022-05-31 18:46:09,636 P16920 INFO ************ Epoch=1 start ************
+2022-05-31 18:46:41,510 P16920 INFO [Metrics] AUC: 0.933901 - logloss: 0.292700
+2022-05-31 18:46:41,511 P16920 INFO Save best model: monitor(max): 0.933901
+2022-05-31 18:46:41,520 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:46:41,559 P16920 INFO Train loss: 0.377147
+2022-05-31 18:46:41,559 P16920 INFO ************ Epoch=1 end ************
+2022-05-31 18:47:13,297 P16920 INFO [Metrics] AUC: 0.944238 - logloss: 0.272118
+2022-05-31 18:47:13,299 P16920 INFO Save best model: monitor(max): 0.944238
+2022-05-31 18:47:13,310 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:47:13,361 P16920 INFO Train loss: 0.361309
+2022-05-31 18:47:13,361 P16920 INFO ************ Epoch=2 end ************
+2022-05-31 18:47:45,052 P16920 INFO [Metrics] AUC: 0.948313 - logloss: 0.261492
+2022-05-31 18:47:45,053 P16920 INFO Save best model: monitor(max): 0.948313
+2022-05-31 18:47:45,064 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:47:45,112 P16920 INFO Train loss: 0.362418
+2022-05-31 18:47:45,112 P16920 INFO ************ Epoch=3 end ************
+2022-05-31 18:48:16,983 P16920 INFO [Metrics] AUC: 0.950069 - logloss: 0.250828
+2022-05-31 18:48:16,984 P16920 INFO Save best model: monitor(max): 0.950069
+2022-05-31 18:48:16,995 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:48:17,037 P16920 INFO Train loss: 0.365230
+2022-05-31 18:48:17,037 P16920 INFO ************ Epoch=4 end ************
+2022-05-31 18:48:48,797 P16920 INFO [Metrics] AUC: 0.951998 - logloss: 0.246179
+2022-05-31 18:48:48,798 P16920 INFO Save best model: monitor(max): 0.951998
+2022-05-31 18:48:48,809 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:48:48,882 P16920 INFO Train loss: 0.368801
+2022-05-31 18:48:48,882 P16920 INFO ************ Epoch=5 end ************
+2022-05-31 18:49:16,351 P16920 INFO [Metrics] AUC: 0.952251 - logloss: 0.244044
+2022-05-31 18:49:16,352 P16920 INFO Save best model: monitor(max): 0.952251
+2022-05-31 18:49:16,362 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:49:16,412 P16920 INFO Train loss: 0.371365
+2022-05-31 18:49:16,412 P16920 INFO ************ Epoch=6 end ************
+2022-05-31 18:49:48,759 P16920 INFO [Metrics] AUC: 0.953469 - logloss: 0.241305
+2022-05-31 18:49:48,760 P16920 INFO Save best model: monitor(max): 0.953469
+2022-05-31 18:49:48,768 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:49:48,823 P16920 INFO Train loss: 0.373331
+2022-05-31 18:49:48,823 P16920 INFO ************ Epoch=7 end ************
+2022-05-31 18:50:21,229 P16920 INFO [Metrics] AUC: 0.953871 - logloss: 0.240481
+2022-05-31 18:50:21,230 P16920 INFO Save best model: monitor(max): 0.953871
+2022-05-31 18:50:21,238 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:50:21,292 P16920 INFO Train loss: 0.374384
+2022-05-31 18:50:21,292 P16920 INFO ************ Epoch=8 end ************
+2022-05-31 18:50:50,300 P16920 INFO [Metrics] AUC: 0.954370 - logloss: 0.240454
+2022-05-31 18:50:50,301 P16920 INFO Save best model: monitor(max): 0.954370
+2022-05-31 18:50:50,313 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:50:50,372 P16920 INFO Train loss: 0.374569
+2022-05-31 18:50:50,372 P16920 INFO ************ Epoch=9 end ************
+2022-05-31 18:51:26,099 P16920 INFO [Metrics] AUC: 0.954567 - logloss: 0.239315
+2022-05-31 18:51:26,100 P16920 INFO Save best model: monitor(max): 0.954567
+2022-05-31 18:51:26,110 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:51:26,148 P16920 INFO Train loss: 0.374840
+2022-05-31 18:51:26,148 P16920 INFO ************ Epoch=10 end ************
+2022-05-31 18:52:01,834 P16920 INFO [Metrics] AUC: 0.955103 - logloss: 0.238806
+2022-05-31 18:52:01,836 P16920 INFO Save best model: monitor(max): 0.955103
+2022-05-31 18:52:01,847 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:52:01,921 P16920 INFO Train loss: 0.375471
+2022-05-31 18:52:01,922 P16920 INFO ************ Epoch=11 end ************
+2022-05-31 18:52:37,467 P16920 INFO [Metrics] AUC: 0.955896 - logloss: 0.235422
+2022-05-31 18:52:37,468 P16920 INFO Save best model: monitor(max): 0.955896
+2022-05-31 18:52:37,478 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:52:37,519 P16920 INFO Train loss: 0.376710
+2022-05-31 18:52:37,519 P16920 INFO ************ Epoch=12 end ************
+2022-05-31 18:53:13,017 P16920 INFO [Metrics] AUC: 0.956460 - logloss: 0.236233
+2022-05-31 18:53:13,017 P16920 INFO Save best model: monitor(max): 0.956460
+2022-05-31 18:53:13,025 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:53:13,080 P16920 INFO Train loss: 0.375659
+2022-05-31 18:53:13,080 P16920 INFO ************ Epoch=13 end ************
+2022-05-31 18:53:48,775 P16920 INFO [Metrics] AUC: 0.956391 - logloss: 0.232838
+2022-05-31 18:53:48,776 P16920 INFO Monitor(max) STOP: 0.956391 !
+2022-05-31 18:53:48,777 P16920 INFO Reduce learning rate on plateau: 0.000100
+2022-05-31 18:53:48,777 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:53:48,819 P16920 INFO Train loss: 0.376075
+2022-05-31 18:53:48,819 P16920 INFO ************ Epoch=14 end ************
+2022-05-31 18:54:24,283 P16920 INFO [Metrics] AUC: 0.967931 - logloss: 0.203813
+2022-05-31 18:54:24,283 P16920 INFO Save best model: monitor(max): 0.967931
+2022-05-31 18:54:24,291 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:54:24,341 P16920 INFO Train loss: 0.276614
+2022-05-31 18:54:24,341 P16920 INFO ************ Epoch=15 end ************
+2022-05-31 18:55:00,032 P16920 INFO [Metrics] AUC: 0.969771 - logloss: 0.208504
+2022-05-31 18:55:00,033 P16920 INFO Save best model: monitor(max): 0.969771
+2022-05-31 18:55:00,043 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:55:00,095 P16920 INFO Train loss: 0.191585
+2022-05-31 18:55:00,096 P16920 INFO ************ Epoch=16 end ************
+2022-05-31 18:55:31,185 P16920 INFO [Metrics] AUC: 0.968663 - logloss: 0.228933
+2022-05-31 18:55:31,186 P16920 INFO Monitor(max) STOP: 0.968663 !
+2022-05-31 18:55:31,186 P16920 INFO Reduce learning rate on plateau: 0.000010
+2022-05-31 18:55:31,186 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:55:31,235 P16920 INFO Train loss: 0.148550
+2022-05-31 18:55:31,236 P16920 INFO ************ Epoch=17 end ************
+2022-05-31 18:55:57,231 P16920 INFO [Metrics] AUC: 0.968163 - logloss: 0.243116
+2022-05-31 18:55:57,232 P16920 INFO Monitor(max) STOP: 0.968163 !
+2022-05-31 18:55:57,232 P16920 INFO Reduce learning rate on plateau: 0.000001
+2022-05-31 18:55:57,232 P16920 INFO Early stopping at epoch=18
+2022-05-31 18:55:57,233 P16920 INFO --- 343/343 batches finished ---
+2022-05-31 18:55:57,330 P16920 INFO Train loss: 0.116393
+2022-05-31 18:55:57,330 P16920 INFO Training finished.
+2022-05-31 18:55:57,331 P16920 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Movielens/AOANet_movielenslatest_x1/movielenslatest_x1_cd32d937/AOANet_movielenslatest_x1_031_bbf8c17a.model
+2022-05-31 18:55:57,354 P16920 INFO ****** Validation evaluation ******
+2022-05-31 18:55:59,102 P16920 INFO [Metrics] AUC: 0.969771 - logloss: 0.208504
+2022-05-31 18:55:59,149 P16920 INFO ******** Test evaluation ********
+2022-05-31 18:55:59,150 P16920 INFO Loading data...
+2022-05-31 18:55:59,150 P16920 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/test.h5
+2022-05-31 18:55:59,156 P16920 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
+2022-05-31 18:55:59,156 P16920 INFO Loading test data done.
+2022-05-31 18:56:00,022 P16920 INFO [Metrics] AUC: 0.969401 - logloss: 0.210458
+
+```
diff --git a/ranking/ctr/AOANet/AOANet_taobaoad_x1/README.md b/ranking/ctr/AOANet/AOANet_taobaoad_x1/README.md
index 0c6f5eda..91b38b8e 100644
--- a/ranking/ctr/AOANet/AOANet_taobaoad_x1/README.md
+++ b/ranking/ctr/AOANet/AOANet_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AOANet model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [AOANet](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/AOANet) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [AOANet](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/AOANet) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/AutoInt/AutoInt+_amazonelectronics_x1/README.md b/ranking/ctr/AutoInt/AutoInt+_amazonelectronics_x1/README.md
index 31fc9a19..3b0cd361 100644
--- a/ranking/ctr/AutoInt/AutoInt+_amazonelectronics_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/AutoInt) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/AutoInt) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/AutoInt/AutoInt+_avazu_x1/README.md b/ranking/ctr/AutoInt/AutoInt+_avazu_x1/README.md
index 6b7cadb3..9e122064 100644
--- a/ranking/ctr/AutoInt/AutoInt+_avazu_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -178,4 +178,4 @@ Running steps:
Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt%2B_avazu_x1#autoint_avazu_x1): deprecated due to bug fix [#30](https://github.com/xue-pai/FuxiCTR/issues/30).
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt%2B_avazu_x1#autoint_avazu_x1): deprecated due to bug fix [#30](https://github.com/reczoo/FuxiCTR/issues/30).
diff --git a/ranking/ctr/AutoInt/AutoInt+_avazu_x4_001/README.md b/ranking/ctr/AutoInt/AutoInt+_avazu_x4_001/README.md
index 01560b8c..78807e4e 100644
--- a/ranking/ctr/AutoInt/AutoInt+_avazu_x4_001/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AutoInt/AutoInt+_avazu_x4_002/README.md b/ranking/ctr/AutoInt/AutoInt+_avazu_x4_002/README.md
index 050c1172..dbdd2896 100644
--- a/ranking/ctr/AutoInt/AutoInt+_avazu_x4_002/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AutoInt/AutoInt+_criteo_x1/README.md b/ranking/ctr/AutoInt/AutoInt+_criteo_x1/README.md
index 6cf9c58f..67ca00d4 100644
--- a/ranking/ctr/AutoInt/AutoInt+_criteo_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -283,4 +283,4 @@ Running steps:
### Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt%2B_criteo_x1#autoint_criteo_x1): deprecated due to bug fix [#30](https://github.com/xue-pai/FuxiCTR/issues/30) of FuxiCTR.
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt%2B_criteo_x1#autoint_criteo_x1): deprecated due to bug fix [#30](https://github.com/reczoo/FuxiCTR/issues/30) of FuxiCTR.
diff --git a/ranking/ctr/AutoInt/AutoInt+_criteo_x4_001/README.md b/ranking/ctr/AutoInt/AutoInt+_criteo_x4_001/README.md
index c5edeca8..14532c2d 100644
--- a/ranking/ctr/AutoInt/AutoInt+_criteo_x4_001/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AutoInt/AutoInt+_criteo_x4_002/README.md b/ranking/ctr/AutoInt/AutoInt+_criteo_x4_002/README.md
index b0bb8b3b..e08b1900 100644
--- a/ranking/ctr/AutoInt/AutoInt+_criteo_x4_002/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AutoInt/AutoInt+_frappe_x1/README.md b/ranking/ctr/AutoInt/AutoInt+_frappe_x1/README.md
index 84648cb4..c4ea9d32 100644
--- a/ranking/ctr/AutoInt/AutoInt+_frappe_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -223,4 +223,4 @@ Running steps:
### Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt_frappe_x1#autoint_frappe_x1): deprecated due to bug fix [#30](https://github.com/xue-pai/FuxiCTR/issues/30) of FuxiCTR.
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt_frappe_x1#autoint_frappe_x1): deprecated due to bug fix [#30](https://github.com/reczoo/FuxiCTR/issues/30) of FuxiCTR.
diff --git a/ranking/ctr/AutoInt/AutoInt+_kkbox_x1/README.md b/ranking/ctr/AutoInt/AutoInt+_kkbox_x1/README.md
index f2da4851..e31f14e9 100644
--- a/ranking/ctr/AutoInt/AutoInt+_kkbox_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AutoInt/AutoInt+_kuaivideo_x1/README.md b/ranking/ctr/AutoInt/AutoInt+_kuaivideo_x1/README.md
index 3daa703c..f1f0d0f4 100644
--- a/ranking/ctr/AutoInt/AutoInt+_kuaivideo_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/AutoInt) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/AutoInt) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/AutoInt/AutoInt+_microvideo1.7m_x1/README.md b/ranking/ctr/AutoInt/AutoInt+_microvideo1.7m_x1/README.md
index acec04c8..41dc1876 100644
--- a/ranking/ctr/AutoInt/AutoInt+_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/AutoInt) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/AutoInt) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/AutoInt/AutoInt+_movielenslatest_x1/README.md b/ranking/ctr/AutoInt/AutoInt+_movielenslatest_x1/README.md
index f39adf7e..4860717d 100644
--- a/ranking/ctr/AutoInt/AutoInt+_movielenslatest_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/mast
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -208,4 +208,4 @@ Running steps:
### Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt%2B_movielenslatest_x1#autoint_movielenslatest_x1): deprecated due to bug fix [#30](https://github.com/xue-pai/FuxiCTR/issues/30) of FuxiCTR.
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt%2B_movielenslatest_x1#autoint_movielenslatest_x1): deprecated due to bug fix [#30](https://github.com/reczoo/FuxiCTR/issues/30) of FuxiCTR.
diff --git a/ranking/ctr/AutoInt/AutoInt+_taobaoad_x1/README.md b/ranking/ctr/AutoInt/AutoInt+_taobaoad_x1/README.md
index ad70b184..c98e62cb 100644
--- a/ranking/ctr/AutoInt/AutoInt+_taobaoad_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt+_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/AutoInt) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/AutoInt) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/AutoInt/AutoInt_avazu_x1/README.md b/ranking/ctr/AutoInt/AutoInt_avazu_x1/README.md
index 4d248791..0bd06f7d 100644
--- a/ranking/ctr/AutoInt/AutoInt_avazu_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -178,4 +178,4 @@ Running steps:
### Revision History
-+ [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt_avazu_x1#autoint_avazu_x1): deprecated due to bug fix [#30](https://github.com/xue-pai/FuxiCTR/issues/30) of FuxiCTR.
++ [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt_avazu_x1#autoint_avazu_x1): deprecated due to bug fix [#30](https://github.com/reczoo/FuxiCTR/issues/30) of FuxiCTR.
diff --git a/ranking/ctr/AutoInt/AutoInt_avazu_x4_001/README.md b/ranking/ctr/AutoInt/AutoInt_avazu_x4_001/README.md
index 64371227..dbd16765 100644
--- a/ranking/ctr/AutoInt/AutoInt_avazu_x4_001/README.md
+++ b/ranking/ctr/AutoInt/AutoInt_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AutoInt/AutoInt_avazu_x4_002/README.md b/ranking/ctr/AutoInt/AutoInt_avazu_x4_002/README.md
index 69d329d0..c414d95f 100644
--- a/ranking/ctr/AutoInt/AutoInt_avazu_x4_002/README.md
+++ b/ranking/ctr/AutoInt/AutoInt_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AutoInt/AutoInt_criteo_x1/README.md b/ranking/ctr/AutoInt/AutoInt_criteo_x1/README.md
index 548900ae..362a21fc 100644
--- a/ranking/ctr/AutoInt/AutoInt_criteo_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -218,4 +218,4 @@ Running steps:
### Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt_criteo_x1#autoint_criteo_x1): deprecated due to bug fix [#30](https://github.com/xue-pai/FuxiCTR/issues/30) of FuxiCTR.
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt_criteo_x1#autoint_criteo_x1): deprecated due to bug fix [#30](https://github.com/reczoo/FuxiCTR/issues/30) of FuxiCTR.
diff --git a/ranking/ctr/AutoInt/AutoInt_criteo_x4_001/README.md b/ranking/ctr/AutoInt/AutoInt_criteo_x4_001/README.md
index af2e7322..b219f91a 100644
--- a/ranking/ctr/AutoInt/AutoInt_criteo_x4_001/README.md
+++ b/ranking/ctr/AutoInt/AutoInt_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AutoInt/AutoInt_criteo_x4_002/README.md b/ranking/ctr/AutoInt/AutoInt_criteo_x4_002/README.md
index 89c86489..76da7562 100644
--- a/ranking/ctr/AutoInt/AutoInt_criteo_x4_002/README.md
+++ b/ranking/ctr/AutoInt/AutoInt_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AutoInt/AutoInt_frappe_x1/README.md b/ranking/ctr/AutoInt/AutoInt_frappe_x1/README.md
index 80d18a21..6ea0d638 100644
--- a/ranking/ctr/AutoInt/AutoInt_frappe_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -303,4 +303,4 @@ Running steps:
### Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt_frappe_x1#autoint_frappe_x1): deprecated due to bug fix [#30](https://github.com/xue-pai/FuxiCTR/issues/30) of FuxiCTR.
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt_frappe_x1#autoint_frappe_x1): deprecated due to bug fix [#30](https://github.com/reczoo/FuxiCTR/issues/30) of FuxiCTR.
diff --git a/ranking/ctr/AutoInt/AutoInt_kkbox_x1/README.md b/ranking/ctr/AutoInt/AutoInt_kkbox_x1/README.md
index 2ccf46a6..040fada8 100644
--- a/ranking/ctr/AutoInt/AutoInt_kkbox_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/KKBox/README.md#KKBox_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [KKBox_x1](https://github.com/reczoo/Datasets/tree/main/KKBox/KKBox_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/AutoInt/AutoInt_movielenslatest_x1/README.md b/ranking/ctr/AutoInt/AutoInt_movielenslatest_x1/README.md
index c5df0877..3001e6a2 100644
--- a/ranking/ctr/AutoInt/AutoInt_movielenslatest_x1/README.md
+++ b/ranking/ctr/AutoInt/AutoInt_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the AutoInt model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/mast
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [AutoInt](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/AutoInt.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -223,4 +223,4 @@ Running steps:
### Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt_movielenslatest_x1#autoint_movielenslatest_x1): deprecated due to bug fix [#30](https://github.com/xue-pai/FuxiCTR/issues/30) of FuxiCTR.
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/AutoInt/AutoInt_movielenslatest_x1#autoint_movielenslatest_x1): deprecated due to bug fix [#30](https://github.com/reczoo/FuxiCTR/issues/30) of FuxiCTR.
diff --git a/ranking/ctr/BST/BST_amazonelectronics_x1/README.md b/ranking/ctr/BST/BST_amazonelectronics_x1/README.md
index f826e696..75874454 100644
--- a/ranking/ctr/BST/BST_amazonelectronics_x1/README.md
+++ b/ranking/ctr/BST/BST_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the BST model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [BST](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/BST) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [BST](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/BST) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/BST/BST_kuaivideo_x1/README.md b/ranking/ctr/BST/BST_kuaivideo_x1/README.md
index 8325e897..f1e68384 100644
--- a/ranking/ctr/BST/BST_kuaivideo_x1/README.md
+++ b/ranking/ctr/BST/BST_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the BST model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [BST](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/BST) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [BST](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/BST) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/BST/BST_microvideo1.7m_x1/README.md b/ranking/ctr/BST/BST_microvideo1.7m_x1/README.md
index 42dc2245..e75948bd 100644
--- a/ranking/ctr/BST/BST_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/BST/BST_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the BST model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [BST](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/BST) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [BST](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/BST) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/BST/BST_taobaoad_x1/README.md b/ranking/ctr/BST/BST_taobaoad_x1/README.md
index 66f0e54d..b3c713f5 100644
--- a/ranking/ctr/BST/BST_taobaoad_x1/README.md
+++ b/ranking/ctr/BST/BST_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the BST model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [BST](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/BST) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [BST](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/BST) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/CCPM/CCPM_avazu_x4_001/README.md b/ranking/ctr/CCPM/CCPM_avazu_x4_001/README.md
index abdd6789..7baaebea 100644
--- a/ranking/ctr/CCPM/CCPM_avazu_x4_001/README.md
+++ b/ranking/ctr/CCPM/CCPM_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the CCPM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [CCPM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/CCPM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [CCPM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/CCPM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/CCPM/CCPM_avazu_x4_002/README.md b/ranking/ctr/CCPM/CCPM_avazu_x4_002/README.md
index ca34585d..a6748f70 100644
--- a/ranking/ctr/CCPM/CCPM_avazu_x4_002/README.md
+++ b/ranking/ctr/CCPM/CCPM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the CCPM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [CCPM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/CCPM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [CCPM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/CCPM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/CCPM/CCPM_criteo_x4_001/README.md b/ranking/ctr/CCPM/CCPM_criteo_x4_001/README.md
index f57221c5..7fc08929 100644
--- a/ranking/ctr/CCPM/CCPM_criteo_x4_001/README.md
+++ b/ranking/ctr/CCPM/CCPM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the CCPM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [CCPM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/CCPM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [CCPM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/CCPM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/CCPM/CCPM_criteo_x4_002/README.md b/ranking/ctr/CCPM/CCPM_criteo_x4_002/README.md
index c5732303..b6d45db2 100644
--- a/ranking/ctr/CCPM/CCPM_criteo_x4_002/README.md
+++ b/ranking/ctr/CCPM/CCPM_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the CCPM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [CCPM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/CCPM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [CCPM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/CCPM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/CrossNet_avazu_x1/README.md b/ranking/ctr/DCN/CrossNet_avazu_x1/README.md
index c10fb97c..7c288985 100644
--- a/ranking/ctr/DCN/CrossNet_avazu_x1/README.md
+++ b/ranking/ctr/DCN/CrossNet_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/CrossNet_avazu_x4_001/README.md b/ranking/ctr/DCN/CrossNet_avazu_x4_001/README.md
index 7bc4c448..8fe64912 100644
--- a/ranking/ctr/DCN/CrossNet_avazu_x4_001/README.md
+++ b/ranking/ctr/DCN/CrossNet_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/CrossNet_avazu_x4_002/README.md b/ranking/ctr/DCN/CrossNet_avazu_x4_002/README.md
index 7d6400f2..1f70821c 100644
--- a/ranking/ctr/DCN/CrossNet_avazu_x4_002/README.md
+++ b/ranking/ctr/DCN/CrossNet_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/CrossNet_criteo_x1/README.md b/ranking/ctr/DCN/CrossNet_criteo_x1/README.md
index ca87168d..60843d9b 100644
--- a/ranking/ctr/DCN/CrossNet_criteo_x1/README.md
+++ b/ranking/ctr/DCN/CrossNet_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/CrossNet_criteo_x4_001/README.md b/ranking/ctr/DCN/CrossNet_criteo_x4_001/README.md
index b33ffedd..9d2c2c3f 100644
--- a/ranking/ctr/DCN/CrossNet_criteo_x4_001/README.md
+++ b/ranking/ctr/DCN/CrossNet_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/CrossNet_criteo_x4_002/README.md b/ranking/ctr/DCN/CrossNet_criteo_x4_002/README.md
index 0fabd263..b9ad4e0a 100644
--- a/ranking/ctr/DCN/CrossNet_criteo_x4_002/README.md
+++ b/ranking/ctr/DCN/CrossNet_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/CrossNet_frappe_x1/README.md b/ranking/ctr/DCN/CrossNet_frappe_x1/README.md
index 3df1d8e0..bc73737d 100644
--- a/ranking/ctr/DCN/CrossNet_frappe_x1/README.md
+++ b/ranking/ctr/DCN/CrossNet_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/CrossNet_kkbox_x1/README.md b/ranking/ctr/DCN/CrossNet_kkbox_x1/README.md
index 1ec64aff..4b5f9894 100644
--- a/ranking/ctr/DCN/CrossNet_kkbox_x1/README.md
+++ b/ranking/ctr/DCN/CrossNet_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/CrossNet_movielenslatest_x1/README.md b/ranking/ctr/DCN/CrossNet_movielenslatest_x1/README.md
index f1cbe1b9..acfb59f9 100644
--- a/ranking/ctr/DCN/CrossNet_movielenslatest_x1/README.md
+++ b/ranking/ctr/DCN/CrossNet_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/DCN_amazonelectronics_x1/README.md b/ranking/ctr/DCN/DCN_amazonelectronics_x1/README.md
index ca444c77..ba1b8af4 100644
--- a/ranking/ctr/DCN/DCN_amazonelectronics_x1/README.md
+++ b/ranking/ctr/DCN/DCN_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [DCN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DCN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DCN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DCN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DCN/DCN_avazu_x1/README.md b/ranking/ctr/DCN/DCN_avazu_x1/README.md
index 0aa6c166..9947eaca 100644
--- a/ranking/ctr/DCN/DCN_avazu_x1/README.md
+++ b/ranking/ctr/DCN/DCN_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/DCN_avazu_x4_001/README.md b/ranking/ctr/DCN/DCN_avazu_x4_001/README.md
index c86695fb..8403ccf5 100644
--- a/ranking/ctr/DCN/DCN_avazu_x4_001/README.md
+++ b/ranking/ctr/DCN/DCN_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/DCN_avazu_x4_002/README.md b/ranking/ctr/DCN/DCN_avazu_x4_002/README.md
index ed98c48d..364c9ed4 100644
--- a/ranking/ctr/DCN/DCN_avazu_x4_002/README.md
+++ b/ranking/ctr/DCN/DCN_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/DCN_criteo_x1/README.md b/ranking/ctr/DCN/DCN_criteo_x1/README.md
index e4fde13e..054f7f05 100644
--- a/ranking/ctr/DCN/DCN_criteo_x1/README.md
+++ b/ranking/ctr/DCN/DCN_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/DCN_criteo_x4_001/README.md b/ranking/ctr/DCN/DCN_criteo_x4_001/README.md
index 60fbed6d..c15ca49c 100644
--- a/ranking/ctr/DCN/DCN_criteo_x4_001/README.md
+++ b/ranking/ctr/DCN/DCN_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/DCN_criteo_x4_002/README.md b/ranking/ctr/DCN/DCN_criteo_x4_002/README.md
index 99d639a0..c9bf03d3 100644
--- a/ranking/ctr/DCN/DCN_criteo_x4_002/README.md
+++ b/ranking/ctr/DCN/DCN_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/DCN_frappe_x1/README.md b/ranking/ctr/DCN/DCN_frappe_x1/README.md
index 2804d30d..e04ce01b 100644
--- a/ranking/ctr/DCN/DCN_frappe_x1/README.md
+++ b/ranking/ctr/DCN/DCN_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/DCN_kkbox_x1/README.md b/ranking/ctr/DCN/DCN_kkbox_x1/README.md
index f17a76bd..f7beafcf 100644
--- a/ranking/ctr/DCN/DCN_kkbox_x1/README.md
+++ b/ranking/ctr/DCN/DCN_kkbox_x1/README.md
@@ -1,354 +1,354 @@
-## DCN_kkbox_x1
-
-A hands-on guide to run the DCN model on the KKBox_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.0
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.0.2
- ```
-
-### Dataset
-Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/KKBox/README.md#KKBox_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/KKBox/KKBox_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DCN_kkbox_x1_tuner_config_05](./DCN_kkbox_x1_tuner_config_05). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd DCN_kkbox_x1
- nohup python run_expid.py --config ./DCN_kkbox_x1_tuner_config_05 --expid DCN_kkbox_x1_005_362e6c13 --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| logloss | AUC |
-|:--------------------:|:--------------------:|
-| 0.476555 | 0.853114 |
-
-
-### Logs
-```python
-2022-03-01 14:55:49,635 P30229 INFO {
- "batch_norm": "False",
- "batch_size": "10000",
- "crossing_layers": "4",
- "data_format": "csv",
- "data_root": "../data/KKBox/",
- "dataset_id": "kkbox_x1_227d337d",
- "debug": "False",
- "dnn_activations": "relu",
- "dnn_hidden_units": "[5000, 5000]",
- "embedding_dim": "128",
- "embedding_dropout": "0",
- "embedding_regularizer": "0.0005",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'str', 'name': ['msno', 'song_id', 'source_system_tab', 'source_screen_name', 'source_type', 'city', 'gender', 'registered_via', 'language'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'genre_ids', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'artist_name', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'name': 'isrc', 'preprocess': 'extract_country_code', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'bd', 'preprocess': 'bucketize_age', 'type': 'categorical'}]",
- "gpu": "1",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['logloss', 'AUC']",
- "min_categr_count": "10",
- "model": "DCN",
- "model_id": "DCN_kkbox_x1_005_362e6c13",
- "model_root": "./KKBox/DCN_kkbox_x1/",
- "monitor": "{'AUC': 1, 'logloss': -1}",
- "monitor_mode": "max",
- "net_dropout": "0.4",
- "net_regularizer": "0",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2019",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/KKBox/KKBox_x1/test.csv",
- "train_data": "../data/KKBox/KKBox_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/KKBox/KKBox_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch",
- "workers": "3"
-}
-2022-03-01 14:55:49,635 P30229 INFO Set up feature encoder...
-2022-03-01 14:55:49,635 P30229 INFO Reading file: ../data/KKBox/KKBox_x1/train.csv
-2022-03-01 14:56:09,480 P30229 INFO Preprocess feature columns...
-2022-03-01 14:56:26,044 P30229 INFO Fit feature encoder...
-2022-03-01 14:56:26,045 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'msno', 'type': 'categorical'}
-2022-03-01 14:56:27,463 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'song_id', 'type': 'categorical'}
-2022-03-01 14:56:29,260 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'source_system_tab', 'type': 'categorical'}
-2022-03-01 14:56:30,176 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'source_screen_name', 'type': 'categorical'}
-2022-03-01 14:56:31,068 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'source_type', 'type': 'categorical'}
-2022-03-01 14:56:31,987 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'city', 'type': 'categorical'}
-2022-03-01 14:56:32,831 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'gender', 'type': 'categorical'}
-2022-03-01 14:56:33,549 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'registered_via', 'type': 'categorical'}
-2022-03-01 14:56:34,357 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'language', 'type': 'categorical'}
-2022-03-01 14:56:35,218 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'genre_ids', 'type': 'sequence'}
-2022-03-01 14:56:45,163 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'artist_name', 'type': 'sequence'}
-2022-03-01 14:56:55,444 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'isrc', 'preprocess': 'extract_country_code', 'type': 'categorical'}
-2022-03-01 14:56:56,635 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'bd', 'preprocess': 'bucketize_age', 'type': 'categorical'}
-2022-03-01 14:56:57,305 P30229 INFO Set feature index...
-2022-03-01 14:56:57,306 P30229 INFO Pickle feature_encode: ../data/KKBox/kkbox_x1_227d337d/feature_encoder.pkl
-2022-03-01 14:56:57,342 P30229 INFO Save feature_map to json: ../data/KKBox/kkbox_x1_227d337d/feature_map.json
-2022-03-01 14:56:57,343 P30229 INFO Set feature encoder done.
-2022-03-01 14:56:59,560 P30229 INFO Total number of parameters: 45157593.
-2022-03-01 14:56:59,560 P30229 INFO Loading data...
-2022-03-01 14:56:59,563 P30229 INFO Reading file: ../data/KKBox/KKBox_x1/train.csv
-2022-03-01 14:57:18,881 P30229 INFO Preprocess feature columns...
-2022-03-01 14:57:35,322 P30229 INFO Transform feature columns...
-2022-03-01 14:58:55,231 P30229 INFO Saving data to h5: ../data/KKBox/kkbox_x1_227d337d/train.h5
-2022-03-01 14:58:56,228 P30229 INFO Reading file: ../data/KKBox/KKBox_x1/valid.csv
-2022-03-01 14:58:58,606 P30229 INFO Preprocess feature columns...
-2022-03-01 14:59:00,582 P30229 INFO Transform feature columns...
-2022-03-01 14:59:10,020 P30229 INFO Saving data to h5: ../data/KKBox/kkbox_x1_227d337d/valid.h5
-2022-03-01 14:59:10,376 P30229 INFO Train samples: total/5901932, pos/2971724, neg/2930208, ratio/50.35%
-2022-03-01 14:59:10,396 P30229 INFO Validation samples: total/737743, pos/371466, neg/366277, ratio/50.35%
-2022-03-01 14:59:10,398 P30229 INFO Loading train data done.
-2022-03-01 14:59:12,917 P30229 INFO Start training: 591 batches/epoch
-2022-03-01 14:59:12,917 P30229 INFO ************ Epoch=1 start ************
-2022-03-01 15:02:25,409 P30229 INFO [Metrics] logloss: 0.554382 - AUC: 0.787493
-2022-03-01 15:02:25,409 P30229 INFO Save best model: monitor(max): 0.233111
-2022-03-01 15:02:25,577 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:02:25,646 P30229 INFO Train loss: 0.607611
-2022-03-01 15:02:25,646 P30229 INFO ************ Epoch=1 end ************
-2022-03-01 15:05:37,834 P30229 INFO [Metrics] logloss: 0.541952 - AUC: 0.799543
-2022-03-01 15:05:37,835 P30229 INFO Save best model: monitor(max): 0.257591
-2022-03-01 15:05:38,178 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:05:38,256 P30229 INFO Train loss: 0.589109
-2022-03-01 15:05:38,257 P30229 INFO ************ Epoch=2 end ************
-2022-03-01 15:08:50,506 P30229 INFO [Metrics] logloss: 0.533545 - AUC: 0.807026
-2022-03-01 15:08:50,506 P30229 INFO Save best model: monitor(max): 0.273481
-2022-03-01 15:08:50,825 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:08:50,912 P30229 INFO Train loss: 0.583545
-2022-03-01 15:08:50,912 P30229 INFO ************ Epoch=3 end ************
-2022-03-01 15:12:03,105 P30229 INFO [Metrics] logloss: 0.527800 - AUC: 0.811652
-2022-03-01 15:12:03,106 P30229 INFO Save best model: monitor(max): 0.283852
-2022-03-01 15:12:03,422 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:12:03,507 P30229 INFO Train loss: 0.578663
-2022-03-01 15:12:03,507 P30229 INFO ************ Epoch=4 end ************
-2022-03-01 15:15:15,642 P30229 INFO [Metrics] logloss: 0.523498 - AUC: 0.815761
-2022-03-01 15:15:15,643 P30229 INFO Save best model: monitor(max): 0.292264
-2022-03-01 15:15:15,969 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:15:16,051 P30229 INFO Train loss: 0.575274
-2022-03-01 15:15:16,051 P30229 INFO ************ Epoch=5 end ************
-2022-03-01 15:18:28,035 P30229 INFO [Metrics] logloss: 0.520412 - AUC: 0.818221
-2022-03-01 15:18:28,036 P30229 INFO Save best model: monitor(max): 0.297809
-2022-03-01 15:18:28,367 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:18:28,450 P30229 INFO Train loss: 0.572853
-2022-03-01 15:18:28,450 P30229 INFO ************ Epoch=6 end ************
-2022-03-01 15:21:40,373 P30229 INFO [Metrics] logloss: 0.517348 - AUC: 0.820593
-2022-03-01 15:21:40,374 P30229 INFO Save best model: monitor(max): 0.303245
-2022-03-01 15:21:40,695 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:21:40,757 P30229 INFO Train loss: 0.571217
-2022-03-01 15:21:40,757 P30229 INFO ************ Epoch=7 end ************
-2022-03-01 15:24:52,684 P30229 INFO [Metrics] logloss: 0.514184 - AUC: 0.823019
-2022-03-01 15:24:52,685 P30229 INFO Save best model: monitor(max): 0.308835
-2022-03-01 15:24:53,007 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:24:53,078 P30229 INFO Train loss: 0.569733
-2022-03-01 15:24:53,079 P30229 INFO ************ Epoch=8 end ************
-2022-03-01 15:28:04,925 P30229 INFO [Metrics] logloss: 0.512940 - AUC: 0.824105
-2022-03-01 15:28:04,926 P30229 INFO Save best model: monitor(max): 0.311165
-2022-03-01 15:28:05,248 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:28:05,316 P30229 INFO Train loss: 0.568352
-2022-03-01 15:28:05,316 P30229 INFO ************ Epoch=9 end ************
-2022-03-01 15:31:17,175 P30229 INFO [Metrics] logloss: 0.511948 - AUC: 0.825831
-2022-03-01 15:31:17,176 P30229 INFO Save best model: monitor(max): 0.313883
-2022-03-01 15:31:17,510 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:31:17,568 P30229 INFO Train loss: 0.567793
-2022-03-01 15:31:17,568 P30229 INFO ************ Epoch=10 end ************
-2022-03-01 15:34:29,440 P30229 INFO [Metrics] logloss: 0.509772 - AUC: 0.826508
-2022-03-01 15:34:29,441 P30229 INFO Save best model: monitor(max): 0.316736
-2022-03-01 15:34:29,768 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:34:29,834 P30229 INFO Train loss: 0.566824
-2022-03-01 15:34:29,834 P30229 INFO ************ Epoch=11 end ************
-2022-03-01 15:37:41,633 P30229 INFO [Metrics] logloss: 0.507963 - AUC: 0.828204
-2022-03-01 15:37:41,634 P30229 INFO Save best model: monitor(max): 0.320240
-2022-03-01 15:37:41,962 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:37:42,030 P30229 INFO Train loss: 0.566042
-2022-03-01 15:37:42,030 P30229 INFO ************ Epoch=12 end ************
-2022-03-01 15:40:53,707 P30229 INFO [Metrics] logloss: 0.507060 - AUC: 0.828721
-2022-03-01 15:40:53,708 P30229 INFO Save best model: monitor(max): 0.321662
-2022-03-01 15:40:54,042 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:40:54,109 P30229 INFO Train loss: 0.565228
-2022-03-01 15:40:54,109 P30229 INFO ************ Epoch=13 end ************
-2022-03-01 15:44:05,893 P30229 INFO [Metrics] logloss: 0.505816 - AUC: 0.829752
-2022-03-01 15:44:05,893 P30229 INFO Save best model: monitor(max): 0.323936
-2022-03-01 15:44:06,219 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:44:06,287 P30229 INFO Train loss: 0.564551
-2022-03-01 15:44:06,287 P30229 INFO ************ Epoch=14 end ************
-2022-03-01 15:47:18,108 P30229 INFO [Metrics] logloss: 0.504040 - AUC: 0.831068
-2022-03-01 15:47:18,109 P30229 INFO Save best model: monitor(max): 0.327028
-2022-03-01 15:47:18,443 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:47:18,505 P30229 INFO Train loss: 0.564035
-2022-03-01 15:47:18,505 P30229 INFO ************ Epoch=15 end ************
-2022-03-01 15:50:30,403 P30229 INFO [Metrics] logloss: 0.504139 - AUC: 0.831387
-2022-03-01 15:50:30,404 P30229 INFO Save best model: monitor(max): 0.327248
-2022-03-01 15:50:31,026 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:50:31,093 P30229 INFO Train loss: 0.563480
-2022-03-01 15:50:31,093 P30229 INFO ************ Epoch=16 end ************
-2022-03-01 15:53:42,788 P30229 INFO [Metrics] logloss: 0.502425 - AUC: 0.832447
-2022-03-01 15:53:42,788 P30229 INFO Save best model: monitor(max): 0.330022
-2022-03-01 15:53:43,133 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:53:43,196 P30229 INFO Train loss: 0.563114
-2022-03-01 15:53:43,196 P30229 INFO ************ Epoch=17 end ************
-2022-03-01 15:56:55,091 P30229 INFO [Metrics] logloss: 0.501691 - AUC: 0.832845
-2022-03-01 15:56:55,092 P30229 INFO Save best model: monitor(max): 0.331154
-2022-03-01 15:56:55,432 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 15:56:55,502 P30229 INFO Train loss: 0.562719
-2022-03-01 15:56:55,502 P30229 INFO ************ Epoch=18 end ************
-2022-03-01 16:00:07,992 P30229 INFO [Metrics] logloss: 0.501307 - AUC: 0.833250
-2022-03-01 16:00:07,992 P30229 INFO Save best model: monitor(max): 0.331943
-2022-03-01 16:00:08,357 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:00:08,422 P30229 INFO Train loss: 0.562152
-2022-03-01 16:00:08,424 P30229 INFO ************ Epoch=19 end ************
-2022-03-01 16:03:20,365 P30229 INFO [Metrics] logloss: 0.499900 - AUC: 0.834338
-2022-03-01 16:03:20,365 P30229 INFO Save best model: monitor(max): 0.334438
-2022-03-01 16:03:20,687 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:03:20,758 P30229 INFO Train loss: 0.561888
-2022-03-01 16:03:20,758 P30229 INFO ************ Epoch=20 end ************
-2022-03-01 16:06:32,429 P30229 INFO [Metrics] logloss: 0.499997 - AUC: 0.834470
-2022-03-01 16:06:32,430 P30229 INFO Save best model: monitor(max): 0.334473
-2022-03-01 16:06:32,760 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:06:32,821 P30229 INFO Train loss: 0.561406
-2022-03-01 16:06:32,821 P30229 INFO ************ Epoch=21 end ************
-2022-03-01 16:09:44,765 P30229 INFO [Metrics] logloss: 0.499577 - AUC: 0.834811
-2022-03-01 16:09:44,766 P30229 INFO Save best model: monitor(max): 0.335234
-2022-03-01 16:09:45,101 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:09:45,163 P30229 INFO Train loss: 0.561064
-2022-03-01 16:09:45,163 P30229 INFO ************ Epoch=22 end ************
-2022-03-01 16:12:56,915 P30229 INFO [Metrics] logloss: 0.497946 - AUC: 0.835879
-2022-03-01 16:12:56,916 P30229 INFO Save best model: monitor(max): 0.337933
-2022-03-01 16:12:57,248 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:12:57,306 P30229 INFO Train loss: 0.560738
-2022-03-01 16:12:57,306 P30229 INFO ************ Epoch=23 end ************
-2022-03-01 16:16:08,992 P30229 INFO [Metrics] logloss: 0.497622 - AUC: 0.836163
-2022-03-01 16:16:08,993 P30229 INFO Save best model: monitor(max): 0.338541
-2022-03-01 16:16:09,327 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:16:09,388 P30229 INFO Train loss: 0.560372
-2022-03-01 16:16:09,388 P30229 INFO ************ Epoch=24 end ************
-2022-03-01 16:19:21,153 P30229 INFO [Metrics] logloss: 0.497501 - AUC: 0.836384
-2022-03-01 16:19:21,154 P30229 INFO Save best model: monitor(max): 0.338883
-2022-03-01 16:19:21,485 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:19:21,557 P30229 INFO Train loss: 0.560137
-2022-03-01 16:19:21,557 P30229 INFO ************ Epoch=25 end ************
-2022-03-01 16:22:33,369 P30229 INFO [Metrics] logloss: 0.496910 - AUC: 0.836766
-2022-03-01 16:22:33,370 P30229 INFO Save best model: monitor(max): 0.339856
-2022-03-01 16:22:33,708 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:22:33,768 P30229 INFO Train loss: 0.559929
-2022-03-01 16:22:33,768 P30229 INFO ************ Epoch=26 end ************
-2022-03-01 16:25:45,485 P30229 INFO [Metrics] logloss: 0.496366 - AUC: 0.837202
-2022-03-01 16:25:45,486 P30229 INFO Save best model: monitor(max): 0.340836
-2022-03-01 16:25:45,820 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:25:45,891 P30229 INFO Train loss: 0.559461
-2022-03-01 16:25:45,891 P30229 INFO ************ Epoch=27 end ************
-2022-03-01 16:28:57,783 P30229 INFO [Metrics] logloss: 0.496371 - AUC: 0.837372
-2022-03-01 16:28:57,784 P30229 INFO Save best model: monitor(max): 0.341001
-2022-03-01 16:28:58,126 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:28:58,184 P30229 INFO Train loss: 0.559289
-2022-03-01 16:28:58,184 P30229 INFO ************ Epoch=28 end ************
-2022-03-01 16:32:09,943 P30229 INFO [Metrics] logloss: 0.495896 - AUC: 0.837699
-2022-03-01 16:32:09,944 P30229 INFO Save best model: monitor(max): 0.341804
-2022-03-01 16:32:10,285 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:32:10,357 P30229 INFO Train loss: 0.559001
-2022-03-01 16:32:10,357 P30229 INFO ************ Epoch=29 end ************
-2022-03-01 16:35:22,114 P30229 INFO [Metrics] logloss: 0.494787 - AUC: 0.838529
-2022-03-01 16:35:22,114 P30229 INFO Save best model: monitor(max): 0.343743
-2022-03-01 16:35:22,436 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:35:22,506 P30229 INFO Train loss: 0.558540
-2022-03-01 16:35:22,506 P30229 INFO ************ Epoch=30 end ************
-2022-03-01 16:38:34,327 P30229 INFO [Metrics] logloss: 0.494936 - AUC: 0.838524
-2022-03-01 16:38:34,328 P30229 INFO Monitor(max) STOP: 0.343588 !
-2022-03-01 16:38:34,328 P30229 INFO Reduce learning rate on plateau: 0.000100
-2022-03-01 16:38:34,328 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:38:34,396 P30229 INFO Train loss: 0.558250
-2022-03-01 16:38:34,396 P30229 INFO ************ Epoch=31 end ************
-2022-03-01 16:41:46,312 P30229 INFO [Metrics] logloss: 0.480445 - AUC: 0.849283
-2022-03-01 16:41:46,313 P30229 INFO Save best model: monitor(max): 0.368838
-2022-03-01 16:41:46,670 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:41:46,739 P30229 INFO Train loss: 0.500224
-2022-03-01 16:41:46,739 P30229 INFO ************ Epoch=32 end ************
-2022-03-01 16:44:58,517 P30229 INFO [Metrics] logloss: 0.477445 - AUC: 0.851704
-2022-03-01 16:44:58,517 P30229 INFO Save best model: monitor(max): 0.374258
-2022-03-01 16:44:58,875 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:44:58,946 P30229 INFO Train loss: 0.472289
-2022-03-01 16:44:58,946 P30229 INFO ************ Epoch=33 end ************
-2022-03-01 16:48:10,869 P30229 INFO [Metrics] logloss: 0.476608 - AUC: 0.852613
-2022-03-01 16:48:10,869 P30229 INFO Save best model: monitor(max): 0.376005
-2022-03-01 16:48:11,218 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:48:11,315 P30229 INFO Train loss: 0.461926
-2022-03-01 16:48:11,315 P30229 INFO ************ Epoch=34 end ************
-2022-03-01 16:51:23,103 P30229 INFO [Metrics] logloss: 0.476696 - AUC: 0.852906
-2022-03-01 16:51:23,104 P30229 INFO Save best model: monitor(max): 0.376210
-2022-03-01 16:51:23,443 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:51:23,513 P30229 INFO Train loss: 0.455410
-2022-03-01 16:51:23,513 P30229 INFO ************ Epoch=35 end ************
-2022-03-01 16:54:35,285 P30229 INFO [Metrics] logloss: 0.476966 - AUC: 0.852999
-2022-03-01 16:54:35,285 P30229 INFO Monitor(max) STOP: 0.376033 !
-2022-03-01 16:54:35,285 P30229 INFO Reduce learning rate on plateau: 0.000010
-2022-03-01 16:54:35,285 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:54:35,354 P30229 INFO Train loss: 0.450437
-2022-03-01 16:54:35,354 P30229 INFO ************ Epoch=36 end ************
-2022-03-01 16:57:47,179 P30229 INFO [Metrics] logloss: 0.481409 - AUC: 0.852600
-2022-03-01 16:57:47,179 P30229 INFO Monitor(max) STOP: 0.371191 !
-2022-03-01 16:57:47,179 P30229 INFO Reduce learning rate on plateau: 0.000001
-2022-03-01 16:57:47,179 P30229 INFO Early stopping at epoch=37
-2022-03-01 16:57:47,179 P30229 INFO --- 591/591 batches finished ---
-2022-03-01 16:57:47,248 P30229 INFO Train loss: 0.432247
-2022-03-01 16:57:47,248 P30229 INFO Training finished.
-2022-03-01 16:57:47,248 P30229 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/KKBox/DCN_kkbox_x1/kkbox_x1_227d337d/DCN_kkbox_x1_005_362e6c13_model.ckpt
-2022-03-01 16:57:47,480 P30229 INFO ****** Validation evaluation ******
-2022-03-01 16:57:55,421 P30229 INFO [Metrics] logloss: 0.476696 - AUC: 0.852906
-2022-03-01 16:57:55,456 P30229 INFO ******** Test evaluation ********
-2022-03-01 16:57:55,456 P30229 INFO Loading data...
-2022-03-01 16:57:55,456 P30229 INFO Reading file: ../data/KKBox/KKBox_x1/test.csv
-2022-03-01 16:57:57,770 P30229 INFO Preprocess feature columns...
-2022-03-01 16:57:59,698 P30229 INFO Transform feature columns...
-2022-03-01 16:58:09,732 P30229 INFO Saving data to h5: ../data/KKBox/kkbox_x1_227d337d/test.h5
-2022-03-01 16:58:09,871 P30229 INFO Test samples: total/737743, pos/371466, neg/366277, ratio/50.35%
-2022-03-01 16:58:09,871 P30229 INFO Loading test data done.
-2022-03-01 16:58:17,807 P30229 INFO [Metrics] logloss: 0.476555 - AUC: 0.853114
-
-```
+## DCN_kkbox_x1
+
+A hands-on guide to run the DCN model on the KKBox_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.0
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.0.2
+ ```
+
+### Dataset
+Dataset ID: [KKBox_x1](https://github.com/reczoo/Datasets/tree/main/KKBox/KKBox_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DCN.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/KKBox/KKBox_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DCN_kkbox_x1_tuner_config_05](./DCN_kkbox_x1_tuner_config_05). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd DCN_kkbox_x1
+ nohup python run_expid.py --config ./DCN_kkbox_x1_tuner_config_05 --expid DCN_kkbox_x1_005_362e6c13 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| logloss | AUC |
+|:--------------------:|:--------------------:|
+| 0.476555 | 0.853114 |
+
+
+### Logs
+```python
+2022-03-01 14:55:49,635 P30229 INFO {
+ "batch_norm": "False",
+ "batch_size": "10000",
+ "crossing_layers": "4",
+ "data_format": "csv",
+ "data_root": "../data/KKBox/",
+ "dataset_id": "kkbox_x1_227d337d",
+ "debug": "False",
+ "dnn_activations": "relu",
+ "dnn_hidden_units": "[5000, 5000]",
+ "embedding_dim": "128",
+ "embedding_dropout": "0",
+ "embedding_regularizer": "0.0005",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'str', 'name': ['msno', 'song_id', 'source_system_tab', 'source_screen_name', 'source_type', 'city', 'gender', 'registered_via', 'language'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'genre_ids', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'artist_name', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'name': 'isrc', 'preprocess': 'extract_country_code', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'bd', 'preprocess': 'bucketize_age', 'type': 'categorical'}]",
+ "gpu": "1",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['logloss', 'AUC']",
+ "min_categr_count": "10",
+ "model": "DCN",
+ "model_id": "DCN_kkbox_x1_005_362e6c13",
+ "model_root": "./KKBox/DCN_kkbox_x1/",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_dropout": "0.4",
+ "net_regularizer": "0",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/KKBox/KKBox_x1/test.csv",
+ "train_data": "../data/KKBox/KKBox_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/KKBox/KKBox_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch",
+ "workers": "3"
+}
+2022-03-01 14:55:49,635 P30229 INFO Set up feature encoder...
+2022-03-01 14:55:49,635 P30229 INFO Reading file: ../data/KKBox/KKBox_x1/train.csv
+2022-03-01 14:56:09,480 P30229 INFO Preprocess feature columns...
+2022-03-01 14:56:26,044 P30229 INFO Fit feature encoder...
+2022-03-01 14:56:26,045 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'msno', 'type': 'categorical'}
+2022-03-01 14:56:27,463 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'song_id', 'type': 'categorical'}
+2022-03-01 14:56:29,260 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'source_system_tab', 'type': 'categorical'}
+2022-03-01 14:56:30,176 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'source_screen_name', 'type': 'categorical'}
+2022-03-01 14:56:31,068 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'source_type', 'type': 'categorical'}
+2022-03-01 14:56:31,987 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'city', 'type': 'categorical'}
+2022-03-01 14:56:32,831 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'gender', 'type': 'categorical'}
+2022-03-01 14:56:33,549 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'registered_via', 'type': 'categorical'}
+2022-03-01 14:56:34,357 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'language', 'type': 'categorical'}
+2022-03-01 14:56:35,218 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'genre_ids', 'type': 'sequence'}
+2022-03-01 14:56:45,163 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'artist_name', 'type': 'sequence'}
+2022-03-01 14:56:55,444 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'isrc', 'preprocess': 'extract_country_code', 'type': 'categorical'}
+2022-03-01 14:56:56,635 P30229 INFO Processing column: {'active': True, 'dtype': 'str', 'name': 'bd', 'preprocess': 'bucketize_age', 'type': 'categorical'}
+2022-03-01 14:56:57,305 P30229 INFO Set feature index...
+2022-03-01 14:56:57,306 P30229 INFO Pickle feature_encode: ../data/KKBox/kkbox_x1_227d337d/feature_encoder.pkl
+2022-03-01 14:56:57,342 P30229 INFO Save feature_map to json: ../data/KKBox/kkbox_x1_227d337d/feature_map.json
+2022-03-01 14:56:57,343 P30229 INFO Set feature encoder done.
+2022-03-01 14:56:59,560 P30229 INFO Total number of parameters: 45157593.
+2022-03-01 14:56:59,560 P30229 INFO Loading data...
+2022-03-01 14:56:59,563 P30229 INFO Reading file: ../data/KKBox/KKBox_x1/train.csv
+2022-03-01 14:57:18,881 P30229 INFO Preprocess feature columns...
+2022-03-01 14:57:35,322 P30229 INFO Transform feature columns...
+2022-03-01 14:58:55,231 P30229 INFO Saving data to h5: ../data/KKBox/kkbox_x1_227d337d/train.h5
+2022-03-01 14:58:56,228 P30229 INFO Reading file: ../data/KKBox/KKBox_x1/valid.csv
+2022-03-01 14:58:58,606 P30229 INFO Preprocess feature columns...
+2022-03-01 14:59:00,582 P30229 INFO Transform feature columns...
+2022-03-01 14:59:10,020 P30229 INFO Saving data to h5: ../data/KKBox/kkbox_x1_227d337d/valid.h5
+2022-03-01 14:59:10,376 P30229 INFO Train samples: total/5901932, pos/2971724, neg/2930208, ratio/50.35%
+2022-03-01 14:59:10,396 P30229 INFO Validation samples: total/737743, pos/371466, neg/366277, ratio/50.35%
+2022-03-01 14:59:10,398 P30229 INFO Loading train data done.
+2022-03-01 14:59:12,917 P30229 INFO Start training: 591 batches/epoch
+2022-03-01 14:59:12,917 P30229 INFO ************ Epoch=1 start ************
+2022-03-01 15:02:25,409 P30229 INFO [Metrics] logloss: 0.554382 - AUC: 0.787493
+2022-03-01 15:02:25,409 P30229 INFO Save best model: monitor(max): 0.233111
+2022-03-01 15:02:25,577 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:02:25,646 P30229 INFO Train loss: 0.607611
+2022-03-01 15:02:25,646 P30229 INFO ************ Epoch=1 end ************
+2022-03-01 15:05:37,834 P30229 INFO [Metrics] logloss: 0.541952 - AUC: 0.799543
+2022-03-01 15:05:37,835 P30229 INFO Save best model: monitor(max): 0.257591
+2022-03-01 15:05:38,178 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:05:38,256 P30229 INFO Train loss: 0.589109
+2022-03-01 15:05:38,257 P30229 INFO ************ Epoch=2 end ************
+2022-03-01 15:08:50,506 P30229 INFO [Metrics] logloss: 0.533545 - AUC: 0.807026
+2022-03-01 15:08:50,506 P30229 INFO Save best model: monitor(max): 0.273481
+2022-03-01 15:08:50,825 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:08:50,912 P30229 INFO Train loss: 0.583545
+2022-03-01 15:08:50,912 P30229 INFO ************ Epoch=3 end ************
+2022-03-01 15:12:03,105 P30229 INFO [Metrics] logloss: 0.527800 - AUC: 0.811652
+2022-03-01 15:12:03,106 P30229 INFO Save best model: monitor(max): 0.283852
+2022-03-01 15:12:03,422 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:12:03,507 P30229 INFO Train loss: 0.578663
+2022-03-01 15:12:03,507 P30229 INFO ************ Epoch=4 end ************
+2022-03-01 15:15:15,642 P30229 INFO [Metrics] logloss: 0.523498 - AUC: 0.815761
+2022-03-01 15:15:15,643 P30229 INFO Save best model: monitor(max): 0.292264
+2022-03-01 15:15:15,969 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:15:16,051 P30229 INFO Train loss: 0.575274
+2022-03-01 15:15:16,051 P30229 INFO ************ Epoch=5 end ************
+2022-03-01 15:18:28,035 P30229 INFO [Metrics] logloss: 0.520412 - AUC: 0.818221
+2022-03-01 15:18:28,036 P30229 INFO Save best model: monitor(max): 0.297809
+2022-03-01 15:18:28,367 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:18:28,450 P30229 INFO Train loss: 0.572853
+2022-03-01 15:18:28,450 P30229 INFO ************ Epoch=6 end ************
+2022-03-01 15:21:40,373 P30229 INFO [Metrics] logloss: 0.517348 - AUC: 0.820593
+2022-03-01 15:21:40,374 P30229 INFO Save best model: monitor(max): 0.303245
+2022-03-01 15:21:40,695 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:21:40,757 P30229 INFO Train loss: 0.571217
+2022-03-01 15:21:40,757 P30229 INFO ************ Epoch=7 end ************
+2022-03-01 15:24:52,684 P30229 INFO [Metrics] logloss: 0.514184 - AUC: 0.823019
+2022-03-01 15:24:52,685 P30229 INFO Save best model: monitor(max): 0.308835
+2022-03-01 15:24:53,007 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:24:53,078 P30229 INFO Train loss: 0.569733
+2022-03-01 15:24:53,079 P30229 INFO ************ Epoch=8 end ************
+2022-03-01 15:28:04,925 P30229 INFO [Metrics] logloss: 0.512940 - AUC: 0.824105
+2022-03-01 15:28:04,926 P30229 INFO Save best model: monitor(max): 0.311165
+2022-03-01 15:28:05,248 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:28:05,316 P30229 INFO Train loss: 0.568352
+2022-03-01 15:28:05,316 P30229 INFO ************ Epoch=9 end ************
+2022-03-01 15:31:17,175 P30229 INFO [Metrics] logloss: 0.511948 - AUC: 0.825831
+2022-03-01 15:31:17,176 P30229 INFO Save best model: monitor(max): 0.313883
+2022-03-01 15:31:17,510 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:31:17,568 P30229 INFO Train loss: 0.567793
+2022-03-01 15:31:17,568 P30229 INFO ************ Epoch=10 end ************
+2022-03-01 15:34:29,440 P30229 INFO [Metrics] logloss: 0.509772 - AUC: 0.826508
+2022-03-01 15:34:29,441 P30229 INFO Save best model: monitor(max): 0.316736
+2022-03-01 15:34:29,768 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:34:29,834 P30229 INFO Train loss: 0.566824
+2022-03-01 15:34:29,834 P30229 INFO ************ Epoch=11 end ************
+2022-03-01 15:37:41,633 P30229 INFO [Metrics] logloss: 0.507963 - AUC: 0.828204
+2022-03-01 15:37:41,634 P30229 INFO Save best model: monitor(max): 0.320240
+2022-03-01 15:37:41,962 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:37:42,030 P30229 INFO Train loss: 0.566042
+2022-03-01 15:37:42,030 P30229 INFO ************ Epoch=12 end ************
+2022-03-01 15:40:53,707 P30229 INFO [Metrics] logloss: 0.507060 - AUC: 0.828721
+2022-03-01 15:40:53,708 P30229 INFO Save best model: monitor(max): 0.321662
+2022-03-01 15:40:54,042 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:40:54,109 P30229 INFO Train loss: 0.565228
+2022-03-01 15:40:54,109 P30229 INFO ************ Epoch=13 end ************
+2022-03-01 15:44:05,893 P30229 INFO [Metrics] logloss: 0.505816 - AUC: 0.829752
+2022-03-01 15:44:05,893 P30229 INFO Save best model: monitor(max): 0.323936
+2022-03-01 15:44:06,219 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:44:06,287 P30229 INFO Train loss: 0.564551
+2022-03-01 15:44:06,287 P30229 INFO ************ Epoch=14 end ************
+2022-03-01 15:47:18,108 P30229 INFO [Metrics] logloss: 0.504040 - AUC: 0.831068
+2022-03-01 15:47:18,109 P30229 INFO Save best model: monitor(max): 0.327028
+2022-03-01 15:47:18,443 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:47:18,505 P30229 INFO Train loss: 0.564035
+2022-03-01 15:47:18,505 P30229 INFO ************ Epoch=15 end ************
+2022-03-01 15:50:30,403 P30229 INFO [Metrics] logloss: 0.504139 - AUC: 0.831387
+2022-03-01 15:50:30,404 P30229 INFO Save best model: monitor(max): 0.327248
+2022-03-01 15:50:31,026 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:50:31,093 P30229 INFO Train loss: 0.563480
+2022-03-01 15:50:31,093 P30229 INFO ************ Epoch=16 end ************
+2022-03-01 15:53:42,788 P30229 INFO [Metrics] logloss: 0.502425 - AUC: 0.832447
+2022-03-01 15:53:42,788 P30229 INFO Save best model: monitor(max): 0.330022
+2022-03-01 15:53:43,133 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:53:43,196 P30229 INFO Train loss: 0.563114
+2022-03-01 15:53:43,196 P30229 INFO ************ Epoch=17 end ************
+2022-03-01 15:56:55,091 P30229 INFO [Metrics] logloss: 0.501691 - AUC: 0.832845
+2022-03-01 15:56:55,092 P30229 INFO Save best model: monitor(max): 0.331154
+2022-03-01 15:56:55,432 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 15:56:55,502 P30229 INFO Train loss: 0.562719
+2022-03-01 15:56:55,502 P30229 INFO ************ Epoch=18 end ************
+2022-03-01 16:00:07,992 P30229 INFO [Metrics] logloss: 0.501307 - AUC: 0.833250
+2022-03-01 16:00:07,992 P30229 INFO Save best model: monitor(max): 0.331943
+2022-03-01 16:00:08,357 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:00:08,422 P30229 INFO Train loss: 0.562152
+2022-03-01 16:00:08,424 P30229 INFO ************ Epoch=19 end ************
+2022-03-01 16:03:20,365 P30229 INFO [Metrics] logloss: 0.499900 - AUC: 0.834338
+2022-03-01 16:03:20,365 P30229 INFO Save best model: monitor(max): 0.334438
+2022-03-01 16:03:20,687 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:03:20,758 P30229 INFO Train loss: 0.561888
+2022-03-01 16:03:20,758 P30229 INFO ************ Epoch=20 end ************
+2022-03-01 16:06:32,429 P30229 INFO [Metrics] logloss: 0.499997 - AUC: 0.834470
+2022-03-01 16:06:32,430 P30229 INFO Save best model: monitor(max): 0.334473
+2022-03-01 16:06:32,760 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:06:32,821 P30229 INFO Train loss: 0.561406
+2022-03-01 16:06:32,821 P30229 INFO ************ Epoch=21 end ************
+2022-03-01 16:09:44,765 P30229 INFO [Metrics] logloss: 0.499577 - AUC: 0.834811
+2022-03-01 16:09:44,766 P30229 INFO Save best model: monitor(max): 0.335234
+2022-03-01 16:09:45,101 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:09:45,163 P30229 INFO Train loss: 0.561064
+2022-03-01 16:09:45,163 P30229 INFO ************ Epoch=22 end ************
+2022-03-01 16:12:56,915 P30229 INFO [Metrics] logloss: 0.497946 - AUC: 0.835879
+2022-03-01 16:12:56,916 P30229 INFO Save best model: monitor(max): 0.337933
+2022-03-01 16:12:57,248 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:12:57,306 P30229 INFO Train loss: 0.560738
+2022-03-01 16:12:57,306 P30229 INFO ************ Epoch=23 end ************
+2022-03-01 16:16:08,992 P30229 INFO [Metrics] logloss: 0.497622 - AUC: 0.836163
+2022-03-01 16:16:08,993 P30229 INFO Save best model: monitor(max): 0.338541
+2022-03-01 16:16:09,327 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:16:09,388 P30229 INFO Train loss: 0.560372
+2022-03-01 16:16:09,388 P30229 INFO ************ Epoch=24 end ************
+2022-03-01 16:19:21,153 P30229 INFO [Metrics] logloss: 0.497501 - AUC: 0.836384
+2022-03-01 16:19:21,154 P30229 INFO Save best model: monitor(max): 0.338883
+2022-03-01 16:19:21,485 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:19:21,557 P30229 INFO Train loss: 0.560137
+2022-03-01 16:19:21,557 P30229 INFO ************ Epoch=25 end ************
+2022-03-01 16:22:33,369 P30229 INFO [Metrics] logloss: 0.496910 - AUC: 0.836766
+2022-03-01 16:22:33,370 P30229 INFO Save best model: monitor(max): 0.339856
+2022-03-01 16:22:33,708 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:22:33,768 P30229 INFO Train loss: 0.559929
+2022-03-01 16:22:33,768 P30229 INFO ************ Epoch=26 end ************
+2022-03-01 16:25:45,485 P30229 INFO [Metrics] logloss: 0.496366 - AUC: 0.837202
+2022-03-01 16:25:45,486 P30229 INFO Save best model: monitor(max): 0.340836
+2022-03-01 16:25:45,820 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:25:45,891 P30229 INFO Train loss: 0.559461
+2022-03-01 16:25:45,891 P30229 INFO ************ Epoch=27 end ************
+2022-03-01 16:28:57,783 P30229 INFO [Metrics] logloss: 0.496371 - AUC: 0.837372
+2022-03-01 16:28:57,784 P30229 INFO Save best model: monitor(max): 0.341001
+2022-03-01 16:28:58,126 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:28:58,184 P30229 INFO Train loss: 0.559289
+2022-03-01 16:28:58,184 P30229 INFO ************ Epoch=28 end ************
+2022-03-01 16:32:09,943 P30229 INFO [Metrics] logloss: 0.495896 - AUC: 0.837699
+2022-03-01 16:32:09,944 P30229 INFO Save best model: monitor(max): 0.341804
+2022-03-01 16:32:10,285 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:32:10,357 P30229 INFO Train loss: 0.559001
+2022-03-01 16:32:10,357 P30229 INFO ************ Epoch=29 end ************
+2022-03-01 16:35:22,114 P30229 INFO [Metrics] logloss: 0.494787 - AUC: 0.838529
+2022-03-01 16:35:22,114 P30229 INFO Save best model: monitor(max): 0.343743
+2022-03-01 16:35:22,436 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:35:22,506 P30229 INFO Train loss: 0.558540
+2022-03-01 16:35:22,506 P30229 INFO ************ Epoch=30 end ************
+2022-03-01 16:38:34,327 P30229 INFO [Metrics] logloss: 0.494936 - AUC: 0.838524
+2022-03-01 16:38:34,328 P30229 INFO Monitor(max) STOP: 0.343588 !
+2022-03-01 16:38:34,328 P30229 INFO Reduce learning rate on plateau: 0.000100
+2022-03-01 16:38:34,328 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:38:34,396 P30229 INFO Train loss: 0.558250
+2022-03-01 16:38:34,396 P30229 INFO ************ Epoch=31 end ************
+2022-03-01 16:41:46,312 P30229 INFO [Metrics] logloss: 0.480445 - AUC: 0.849283
+2022-03-01 16:41:46,313 P30229 INFO Save best model: monitor(max): 0.368838
+2022-03-01 16:41:46,670 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:41:46,739 P30229 INFO Train loss: 0.500224
+2022-03-01 16:41:46,739 P30229 INFO ************ Epoch=32 end ************
+2022-03-01 16:44:58,517 P30229 INFO [Metrics] logloss: 0.477445 - AUC: 0.851704
+2022-03-01 16:44:58,517 P30229 INFO Save best model: monitor(max): 0.374258
+2022-03-01 16:44:58,875 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:44:58,946 P30229 INFO Train loss: 0.472289
+2022-03-01 16:44:58,946 P30229 INFO ************ Epoch=33 end ************
+2022-03-01 16:48:10,869 P30229 INFO [Metrics] logloss: 0.476608 - AUC: 0.852613
+2022-03-01 16:48:10,869 P30229 INFO Save best model: monitor(max): 0.376005
+2022-03-01 16:48:11,218 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:48:11,315 P30229 INFO Train loss: 0.461926
+2022-03-01 16:48:11,315 P30229 INFO ************ Epoch=34 end ************
+2022-03-01 16:51:23,103 P30229 INFO [Metrics] logloss: 0.476696 - AUC: 0.852906
+2022-03-01 16:51:23,104 P30229 INFO Save best model: monitor(max): 0.376210
+2022-03-01 16:51:23,443 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:51:23,513 P30229 INFO Train loss: 0.455410
+2022-03-01 16:51:23,513 P30229 INFO ************ Epoch=35 end ************
+2022-03-01 16:54:35,285 P30229 INFO [Metrics] logloss: 0.476966 - AUC: 0.852999
+2022-03-01 16:54:35,285 P30229 INFO Monitor(max) STOP: 0.376033 !
+2022-03-01 16:54:35,285 P30229 INFO Reduce learning rate on plateau: 0.000010
+2022-03-01 16:54:35,285 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:54:35,354 P30229 INFO Train loss: 0.450437
+2022-03-01 16:54:35,354 P30229 INFO ************ Epoch=36 end ************
+2022-03-01 16:57:47,179 P30229 INFO [Metrics] logloss: 0.481409 - AUC: 0.852600
+2022-03-01 16:57:47,179 P30229 INFO Monitor(max) STOP: 0.371191 !
+2022-03-01 16:57:47,179 P30229 INFO Reduce learning rate on plateau: 0.000001
+2022-03-01 16:57:47,179 P30229 INFO Early stopping at epoch=37
+2022-03-01 16:57:47,179 P30229 INFO --- 591/591 batches finished ---
+2022-03-01 16:57:47,248 P30229 INFO Train loss: 0.432247
+2022-03-01 16:57:47,248 P30229 INFO Training finished.
+2022-03-01 16:57:47,248 P30229 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/KKBox/DCN_kkbox_x1/kkbox_x1_227d337d/DCN_kkbox_x1_005_362e6c13_model.ckpt
+2022-03-01 16:57:47,480 P30229 INFO ****** Validation evaluation ******
+2022-03-01 16:57:55,421 P30229 INFO [Metrics] logloss: 0.476696 - AUC: 0.852906
+2022-03-01 16:57:55,456 P30229 INFO ******** Test evaluation ********
+2022-03-01 16:57:55,456 P30229 INFO Loading data...
+2022-03-01 16:57:55,456 P30229 INFO Reading file: ../data/KKBox/KKBox_x1/test.csv
+2022-03-01 16:57:57,770 P30229 INFO Preprocess feature columns...
+2022-03-01 16:57:59,698 P30229 INFO Transform feature columns...
+2022-03-01 16:58:09,732 P30229 INFO Saving data to h5: ../data/KKBox/kkbox_x1_227d337d/test.h5
+2022-03-01 16:58:09,871 P30229 INFO Test samples: total/737743, pos/371466, neg/366277, ratio/50.35%
+2022-03-01 16:58:09,871 P30229 INFO Loading test data done.
+2022-03-01 16:58:17,807 P30229 INFO [Metrics] logloss: 0.476555 - AUC: 0.853114
+
+```
diff --git a/ranking/ctr/DCN/DCN_kuaivideo_x1/README.md b/ranking/ctr/DCN/DCN_kuaivideo_x1/README.md
index 08b61626..bc09b293 100644
--- a/ranking/ctr/DCN/DCN_kuaivideo_x1/README.md
+++ b/ranking/ctr/DCN/DCN_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [DCN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DCN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DCN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DCN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DCN/DCN_microvideo1.7m_x1/README.md b/ranking/ctr/DCN/DCN_microvideo1.7m_x1/README.md
index a882cbac..5ef6c6bc 100644
--- a/ranking/ctr/DCN/DCN_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/DCN/DCN_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [DCN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DCN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DCN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DCN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DCN/DCN_movielenslatest_x1/README.md b/ranking/ctr/DCN/DCN_movielenslatest_x1/README.md
index d8ce3e56..f3402788 100644
--- a/ranking/ctr/DCN/DCN_movielenslatest_x1/README.md
+++ b/ranking/ctr/DCN/DCN_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -36,15 +36,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCN/DCN_taobaoad_x1/README.md b/ranking/ctr/DCN/DCN_taobaoad_x1/README.md
index 8602decf..f3301748 100644
--- a/ranking/ctr/DCN/DCN_taobaoad_x1/README.md
+++ b/ranking/ctr/DCN/DCN_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCN model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [DCN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DCN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DCN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DCN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DCNv2/CrossNetv2_avazu_x1/README.md b/ranking/ctr/DCNv2/CrossNetv2_avazu_x1/README.md
index f2f0acea..a7ef6393 100644
--- a/ranking/ctr/DCNv2/CrossNetv2_avazu_x1/README.md
+++ b/ranking/ctr/DCNv2/CrossNetv2_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCNv2/CrossNetv2_criteo_x1/README.md b/ranking/ctr/DCNv2/CrossNetv2_criteo_x1/README.md
index 4049e075..993424e1 100644
--- a/ranking/ctr/DCNv2/CrossNetv2_criteo_x1/README.md
+++ b/ranking/ctr/DCNv2/CrossNetv2_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCNv2/CrossNetv2_frappe_x1/README.md b/ranking/ctr/DCNv2/CrossNetv2_frappe_x1/README.md
index 34e2038a..c433d079 100644
--- a/ranking/ctr/DCNv2/CrossNetv2_frappe_x1/README.md
+++ b/ranking/ctr/DCNv2/CrossNetv2_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCNv2/CrossNetv2_movielenslatest_x1/README.md b/ranking/ctr/DCNv2/CrossNetv2_movielenslatest_x1/README.md
index def8aa82..f9a0d57d 100644
--- a/ranking/ctr/DCNv2/CrossNetv2_movielenslatest_x1/README.md
+++ b/ranking/ctr/DCNv2/CrossNetv2_movielenslatest_x1/README.md
@@ -1,296 +1,296 @@
-## CrossNetv2_movielenslatest_x1
-
-A hands-on guide to run the DCNv2 model on the MovielensLatest_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 11.4
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.1.0
- ```
-
-### Dataset
-Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [CrossNetv2_movielenslatest_x1_tuner_config_01](./CrossNetv2_movielenslatest_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd CrossNetv2_movielenslatest_x1
- nohup python run_expid.py --config ./CrossNetv2_movielenslatest_x1_tuner_config_01 --expid DCNv2_movielenslatest_x1_005_a53d8bd5 --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.960600 | 0.257848 |
-
-
-### Logs
-```python
-2022-01-23 13:32:02,119 P14331 INFO {
- "batch_norm": "False",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Movielens/",
- "dataset_id": "movielenslatest_x1_cd32d937",
- "debug": "False",
- "dnn_activations": "relu",
- "embedding_dim": "10",
- "embedding_regularizer": "0.001",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user_id', 'item_id', 'tag_id'], 'type': 'categorical'}]",
- "gpu": "0",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "low_rank": "32",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "DCNv2",
- "model_id": "DCNv2_movielenslatest_x1_005_a53d8bd5",
- "model_root": "./Frappe/DCNv2_movielenslatest_x1/",
- "model_structure": "crossnet_only",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_dropout": "0",
- "net_regularizer": "0",
- "num_cross_layers": "8",
- "num_experts": "4",
- "num_workers": "3",
- "optimizer": "adam",
- "parallel_dnn_hidden_units": "[500, 500, 500]",
- "partition_block_size": "-1",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "stacked_dnn_hidden_units": "[500, 500, 500]",
- "task": "binary_classification",
- "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
- "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
- "use_hdf5": "True",
- "use_low_rank_mixture": "False",
- "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch"
-}
-2022-01-23 13:32:02,120 P14331 INFO Set up feature encoder...
-2022-01-23 13:32:02,120 P14331 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_cd32d937/feature_map.json
-2022-01-23 13:32:02,121 P14331 INFO Loading data...
-2022-01-23 13:32:02,123 P14331 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/train.h5
-2022-01-23 13:32:02,152 P14331 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/valid.h5
-2022-01-23 13:32:02,160 P14331 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
-2022-01-23 13:32:02,160 P14331 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
-2022-01-23 13:32:02,160 P14331 INFO Loading train data done.
-2022-01-23 13:32:07,211 P14331 INFO Total number of parameters: 909861.
-2022-01-23 13:32:07,212 P14331 INFO Start training: 343 batches/epoch
-2022-01-23 13:32:07,212 P14331 INFO ************ Epoch=1 start ************
-2022-01-23 13:32:31,195 P14331 INFO [Metrics] AUC: 0.927115 - logloss: 0.307313
-2022-01-23 13:32:31,195 P14331 INFO Save best model: monitor(max): 0.927115
-2022-01-23 13:32:31,200 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:32:31,253 P14331 INFO Train loss: 0.468740
-2022-01-23 13:32:31,253 P14331 INFO ************ Epoch=1 end ************
-2022-01-23 13:32:39,231 P14331 INFO [Metrics] AUC: 0.932153 - logloss: 0.299090
-2022-01-23 13:32:39,232 P14331 INFO Save best model: monitor(max): 0.932153
-2022-01-23 13:32:39,237 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:32:39,294 P14331 INFO Train loss: 0.353250
-2022-01-23 13:32:39,294 P14331 INFO ************ Epoch=2 end ************
-2022-01-23 13:32:47,195 P14331 INFO [Metrics] AUC: 0.933659 - logloss: 0.296147
-2022-01-23 13:32:47,196 P14331 INFO Save best model: monitor(max): 0.933659
-2022-01-23 13:32:47,201 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:32:47,257 P14331 INFO Train loss: 0.338897
-2022-01-23 13:32:47,257 P14331 INFO ************ Epoch=3 end ************
-2022-01-23 13:33:15,034 P14331 INFO [Metrics] AUC: 0.934972 - logloss: 0.293528
-2022-01-23 13:33:15,035 P14331 INFO Save best model: monitor(max): 0.934972
-2022-01-23 13:33:15,040 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:33:15,093 P14331 INFO Train loss: 0.331366
-2022-01-23 13:33:15,093 P14331 INFO ************ Epoch=4 end ************
-2022-01-23 13:33:58,454 P14331 INFO [Metrics] AUC: 0.935891 - logloss: 0.291285
-2022-01-23 13:33:58,454 P14331 INFO Save best model: monitor(max): 0.935891
-2022-01-23 13:33:58,460 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:33:58,519 P14331 INFO Train loss: 0.325854
-2022-01-23 13:33:58,519 P14331 INFO ************ Epoch=5 end ************
-2022-01-23 13:34:42,534 P14331 INFO [Metrics] AUC: 0.938383 - logloss: 0.285677
-2022-01-23 13:34:42,535 P14331 INFO Save best model: monitor(max): 0.938383
-2022-01-23 13:34:42,541 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:34:42,593 P14331 INFO Train loss: 0.320053
-2022-01-23 13:34:42,593 P14331 INFO ************ Epoch=6 end ************
-2022-01-23 13:35:26,158 P14331 INFO [Metrics] AUC: 0.941837 - logloss: 0.277530
-2022-01-23 13:35:26,158 P14331 INFO Save best model: monitor(max): 0.941837
-2022-01-23 13:35:26,164 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:35:26,212 P14331 INFO Train loss: 0.311736
-2022-01-23 13:35:26,212 P14331 INFO ************ Epoch=7 end ************
-2022-01-23 13:36:10,017 P14331 INFO [Metrics] AUC: 0.944402 - logloss: 0.271048
-2022-01-23 13:36:10,017 P14331 INFO Save best model: monitor(max): 0.944402
-2022-01-23 13:36:10,023 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:36:10,073 P14331 INFO Train loss: 0.304064
-2022-01-23 13:36:10,073 P14331 INFO ************ Epoch=8 end ************
-2022-01-23 13:36:53,642 P14331 INFO [Metrics] AUC: 0.947380 - logloss: 0.264100
-2022-01-23 13:36:53,642 P14331 INFO Save best model: monitor(max): 0.947380
-2022-01-23 13:36:53,648 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:36:53,695 P14331 INFO Train loss: 0.296497
-2022-01-23 13:36:53,696 P14331 INFO ************ Epoch=9 end ************
-2022-01-23 13:37:37,226 P14331 INFO [Metrics] AUC: 0.949104 - logloss: 0.260154
-2022-01-23 13:37:37,226 P14331 INFO Save best model: monitor(max): 0.949104
-2022-01-23 13:37:37,232 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:37:37,279 P14331 INFO Train loss: 0.286990
-2022-01-23 13:37:37,279 P14331 INFO ************ Epoch=10 end ************
-2022-01-23 13:38:21,087 P14331 INFO [Metrics] AUC: 0.950721 - logloss: 0.256409
-2022-01-23 13:38:21,088 P14331 INFO Save best model: monitor(max): 0.950721
-2022-01-23 13:38:21,094 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:38:21,143 P14331 INFO Train loss: 0.279196
-2022-01-23 13:38:21,143 P14331 INFO ************ Epoch=11 end ************
-2022-01-23 13:39:05,407 P14331 INFO [Metrics] AUC: 0.951915 - logloss: 0.253872
-2022-01-23 13:39:05,407 P14331 INFO Save best model: monitor(max): 0.951915
-2022-01-23 13:39:05,413 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:39:05,492 P14331 INFO Train loss: 0.272639
-2022-01-23 13:39:05,493 P14331 INFO ************ Epoch=12 end ************
-2022-01-23 13:39:49,580 P14331 INFO [Metrics] AUC: 0.952646 - logloss: 0.252227
-2022-01-23 13:39:49,581 P14331 INFO Save best model: monitor(max): 0.952646
-2022-01-23 13:39:49,586 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:39:49,638 P14331 INFO Train loss: 0.267295
-2022-01-23 13:39:49,638 P14331 INFO ************ Epoch=13 end ************
-2022-01-23 13:40:33,519 P14331 INFO [Metrics] AUC: 0.953356 - logloss: 0.250443
-2022-01-23 13:40:33,519 P14331 INFO Save best model: monitor(max): 0.953356
-2022-01-23 13:40:33,525 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:40:33,571 P14331 INFO Train loss: 0.262843
-2022-01-23 13:40:33,571 P14331 INFO ************ Epoch=14 end ************
-2022-01-23 13:41:17,246 P14331 INFO [Metrics] AUC: 0.953862 - logloss: 0.249957
-2022-01-23 13:41:17,246 P14331 INFO Save best model: monitor(max): 0.953862
-2022-01-23 13:41:17,252 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:41:17,299 P14331 INFO Train loss: 0.258983
-2022-01-23 13:41:17,299 P14331 INFO ************ Epoch=15 end ************
-2022-01-23 13:42:00,881 P14331 INFO [Metrics] AUC: 0.954835 - logloss: 0.247557
-2022-01-23 13:42:00,881 P14331 INFO Save best model: monitor(max): 0.954835
-2022-01-23 13:42:00,887 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:42:00,934 P14331 INFO Train loss: 0.255221
-2022-01-23 13:42:00,934 P14331 INFO ************ Epoch=16 end ************
-2022-01-23 13:42:44,366 P14331 INFO [Metrics] AUC: 0.955381 - logloss: 0.246348
-2022-01-23 13:42:44,366 P14331 INFO Save best model: monitor(max): 0.955381
-2022-01-23 13:42:44,372 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:42:44,418 P14331 INFO Train loss: 0.251695
-2022-01-23 13:42:44,418 P14331 INFO ************ Epoch=17 end ************
-2022-01-23 13:43:27,947 P14331 INFO [Metrics] AUC: 0.955865 - logloss: 0.245874
-2022-01-23 13:43:27,947 P14331 INFO Save best model: monitor(max): 0.955865
-2022-01-23 13:43:27,953 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:43:27,999 P14331 INFO Train loss: 0.247962
-2022-01-23 13:43:27,999 P14331 INFO ************ Epoch=18 end ************
-2022-01-23 13:44:12,125 P14331 INFO [Metrics] AUC: 0.956488 - logloss: 0.245183
-2022-01-23 13:44:12,126 P14331 INFO Save best model: monitor(max): 0.956488
-2022-01-23 13:44:12,132 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:44:12,182 P14331 INFO Train loss: 0.243939
-2022-01-23 13:44:12,182 P14331 INFO ************ Epoch=19 end ************
-2022-01-23 13:44:55,588 P14331 INFO [Metrics] AUC: 0.957065 - logloss: 0.243498
-2022-01-23 13:44:55,588 P14331 INFO Save best model: monitor(max): 0.957065
-2022-01-23 13:44:55,594 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:44:55,642 P14331 INFO Train loss: 0.240151
-2022-01-23 13:44:55,642 P14331 INFO ************ Epoch=20 end ************
-2022-01-23 13:45:39,335 P14331 INFO [Metrics] AUC: 0.957455 - logloss: 0.244029
-2022-01-23 13:45:39,335 P14331 INFO Save best model: monitor(max): 0.957455
-2022-01-23 13:45:39,341 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:45:39,390 P14331 INFO Train loss: 0.236745
-2022-01-23 13:45:39,390 P14331 INFO ************ Epoch=21 end ************
-2022-01-23 13:46:23,262 P14331 INFO [Metrics] AUC: 0.957577 - logloss: 0.244996
-2022-01-23 13:46:23,263 P14331 INFO Save best model: monitor(max): 0.957577
-2022-01-23 13:46:23,269 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:46:23,318 P14331 INFO Train loss: 0.233452
-2022-01-23 13:46:23,318 P14331 INFO ************ Epoch=22 end ************
-2022-01-23 13:47:07,423 P14331 INFO [Metrics] AUC: 0.957621 - logloss: 0.245759
-2022-01-23 13:47:07,423 P14331 INFO Save best model: monitor(max): 0.957621
-2022-01-23 13:47:07,430 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:47:07,476 P14331 INFO Train loss: 0.230427
-2022-01-23 13:47:07,478 P14331 INFO ************ Epoch=23 end ************
-2022-01-23 13:47:50,921 P14331 INFO [Metrics] AUC: 0.958001 - logloss: 0.246918
-2022-01-23 13:47:50,922 P14331 INFO Save best model: monitor(max): 0.958001
-2022-01-23 13:47:50,929 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:47:50,979 P14331 INFO Train loss: 0.227437
-2022-01-23 13:47:50,979 P14331 INFO ************ Epoch=24 end ************
-2022-01-23 13:48:33,936 P14331 INFO [Metrics] AUC: 0.957814 - logloss: 0.248785
-2022-01-23 13:48:33,937 P14331 INFO Monitor(max) STOP: 0.957814 !
-2022-01-23 13:48:33,937 P14331 INFO Reduce learning rate on plateau: 0.000100
-2022-01-23 13:48:33,937 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:48:33,984 P14331 INFO Train loss: 0.224838
-2022-01-23 13:48:33,985 P14331 INFO ************ Epoch=25 end ************
-2022-01-23 13:49:17,302 P14331 INFO [Metrics] AUC: 0.960105 - logloss: 0.247866
-2022-01-23 13:49:17,302 P14331 INFO Save best model: monitor(max): 0.960105
-2022-01-23 13:49:17,308 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:49:17,368 P14331 INFO Train loss: 0.182522
-2022-01-23 13:49:17,368 P14331 INFO ************ Epoch=26 end ************
-2022-01-23 13:50:00,952 P14331 INFO [Metrics] AUC: 0.960734 - logloss: 0.251583
-2022-01-23 13:50:00,952 P14331 INFO Save best model: monitor(max): 0.960734
-2022-01-23 13:50:00,958 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:50:01,040 P14331 INFO Train loss: 0.162325
-2022-01-23 13:50:01,040 P14331 INFO ************ Epoch=27 end ************
-2022-01-23 13:50:44,974 P14331 INFO [Metrics] AUC: 0.960746 - logloss: 0.257197
-2022-01-23 13:50:44,974 P14331 INFO Save best model: monitor(max): 0.960746
-2022-01-23 13:50:44,980 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:50:45,026 P14331 INFO Train loss: 0.150766
-2022-01-23 13:50:45,026 P14331 INFO ************ Epoch=28 end ************
-2022-01-23 13:51:29,111 P14331 INFO [Metrics] AUC: 0.960468 - logloss: 0.263855
-2022-01-23 13:51:29,111 P14331 INFO Monitor(max) STOP: 0.960468 !
-2022-01-23 13:51:29,111 P14331 INFO Reduce learning rate on plateau: 0.000010
-2022-01-23 13:51:29,111 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:51:29,163 P14331 INFO Train loss: 0.142814
-2022-01-23 13:51:29,163 P14331 INFO ************ Epoch=29 end ************
-2022-01-23 13:52:13,209 P14331 INFO [Metrics] AUC: 0.960383 - logloss: 0.265112
-2022-01-23 13:52:13,210 P14331 INFO Monitor(max) STOP: 0.960383 !
-2022-01-23 13:52:13,210 P14331 INFO Reduce learning rate on plateau: 0.000001
-2022-01-23 13:52:13,210 P14331 INFO Early stopping at epoch=30
-2022-01-23 13:52:13,210 P14331 INFO --- 343/343 batches finished ---
-2022-01-23 13:52:13,263 P14331 INFO Train loss: 0.132949
-2022-01-23 13:52:13,263 P14331 INFO Training finished.
-2022-01-23 13:52:13,264 P14331 INFO Load best model: /home/FuxiCTR/benchmarks/Frappe/DCNv2_movielenslatest_x1/movielenslatest_x1_cd32d937/DCNv2_movielenslatest_x1_005_a53d8bd5.model
-2022-01-23 13:52:13,313 P14331 INFO ****** Validation evaluation ******
-2022-01-23 13:52:14,890 P14331 INFO [Metrics] AUC: 0.960746 - logloss: 0.257197
-2022-01-23 13:52:14,937 P14331 INFO ******** Test evaluation ********
-2022-01-23 13:52:14,938 P14331 INFO Loading data...
-2022-01-23 13:52:14,938 P14331 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/test.h5
-2022-01-23 13:52:14,943 P14331 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
-2022-01-23 13:52:14,943 P14331 INFO Loading test data done.
-2022-01-23 13:52:15,794 P14331 INFO [Metrics] AUC: 0.960600 - logloss: 0.257848
-
-```
+## CrossNetv2_movielenslatest_x1
+
+A hands-on guide to run the DCNv2 model on the MovielensLatest_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 11.4
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.1.0
+ ```
+
+### Dataset
+Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [CrossNetv2_movielenslatest_x1_tuner_config_01](./CrossNetv2_movielenslatest_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd CrossNetv2_movielenslatest_x1
+ nohup python run_expid.py --config ./CrossNetv2_movielenslatest_x1_tuner_config_01 --expid DCNv2_movielenslatest_x1_005_a53d8bd5 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.960600 | 0.257848 |
+
+
+### Logs
+```python
+2022-01-23 13:32:02,119 P14331 INFO {
+ "batch_norm": "False",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Movielens/",
+ "dataset_id": "movielenslatest_x1_cd32d937",
+ "debug": "False",
+ "dnn_activations": "relu",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.001",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user_id', 'item_id', 'tag_id'], 'type': 'categorical'}]",
+ "gpu": "0",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "low_rank": "32",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "DCNv2",
+ "model_id": "DCNv2_movielenslatest_x1_005_a53d8bd5",
+ "model_root": "./Frappe/DCNv2_movielenslatest_x1/",
+ "model_structure": "crossnet_only",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0",
+ "net_regularizer": "0",
+ "num_cross_layers": "8",
+ "num_experts": "4",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "parallel_dnn_hidden_units": "[500, 500, 500]",
+ "partition_block_size": "-1",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "stacked_dnn_hidden_units": "[500, 500, 500]",
+ "task": "binary_classification",
+ "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
+ "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
+ "use_hdf5": "True",
+ "use_low_rank_mixture": "False",
+ "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch"
+}
+2022-01-23 13:32:02,120 P14331 INFO Set up feature encoder...
+2022-01-23 13:32:02,120 P14331 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_cd32d937/feature_map.json
+2022-01-23 13:32:02,121 P14331 INFO Loading data...
+2022-01-23 13:32:02,123 P14331 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/train.h5
+2022-01-23 13:32:02,152 P14331 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/valid.h5
+2022-01-23 13:32:02,160 P14331 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
+2022-01-23 13:32:02,160 P14331 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
+2022-01-23 13:32:02,160 P14331 INFO Loading train data done.
+2022-01-23 13:32:07,211 P14331 INFO Total number of parameters: 909861.
+2022-01-23 13:32:07,212 P14331 INFO Start training: 343 batches/epoch
+2022-01-23 13:32:07,212 P14331 INFO ************ Epoch=1 start ************
+2022-01-23 13:32:31,195 P14331 INFO [Metrics] AUC: 0.927115 - logloss: 0.307313
+2022-01-23 13:32:31,195 P14331 INFO Save best model: monitor(max): 0.927115
+2022-01-23 13:32:31,200 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:32:31,253 P14331 INFO Train loss: 0.468740
+2022-01-23 13:32:31,253 P14331 INFO ************ Epoch=1 end ************
+2022-01-23 13:32:39,231 P14331 INFO [Metrics] AUC: 0.932153 - logloss: 0.299090
+2022-01-23 13:32:39,232 P14331 INFO Save best model: monitor(max): 0.932153
+2022-01-23 13:32:39,237 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:32:39,294 P14331 INFO Train loss: 0.353250
+2022-01-23 13:32:39,294 P14331 INFO ************ Epoch=2 end ************
+2022-01-23 13:32:47,195 P14331 INFO [Metrics] AUC: 0.933659 - logloss: 0.296147
+2022-01-23 13:32:47,196 P14331 INFO Save best model: monitor(max): 0.933659
+2022-01-23 13:32:47,201 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:32:47,257 P14331 INFO Train loss: 0.338897
+2022-01-23 13:32:47,257 P14331 INFO ************ Epoch=3 end ************
+2022-01-23 13:33:15,034 P14331 INFO [Metrics] AUC: 0.934972 - logloss: 0.293528
+2022-01-23 13:33:15,035 P14331 INFO Save best model: monitor(max): 0.934972
+2022-01-23 13:33:15,040 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:33:15,093 P14331 INFO Train loss: 0.331366
+2022-01-23 13:33:15,093 P14331 INFO ************ Epoch=4 end ************
+2022-01-23 13:33:58,454 P14331 INFO [Metrics] AUC: 0.935891 - logloss: 0.291285
+2022-01-23 13:33:58,454 P14331 INFO Save best model: monitor(max): 0.935891
+2022-01-23 13:33:58,460 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:33:58,519 P14331 INFO Train loss: 0.325854
+2022-01-23 13:33:58,519 P14331 INFO ************ Epoch=5 end ************
+2022-01-23 13:34:42,534 P14331 INFO [Metrics] AUC: 0.938383 - logloss: 0.285677
+2022-01-23 13:34:42,535 P14331 INFO Save best model: monitor(max): 0.938383
+2022-01-23 13:34:42,541 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:34:42,593 P14331 INFO Train loss: 0.320053
+2022-01-23 13:34:42,593 P14331 INFO ************ Epoch=6 end ************
+2022-01-23 13:35:26,158 P14331 INFO [Metrics] AUC: 0.941837 - logloss: 0.277530
+2022-01-23 13:35:26,158 P14331 INFO Save best model: monitor(max): 0.941837
+2022-01-23 13:35:26,164 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:35:26,212 P14331 INFO Train loss: 0.311736
+2022-01-23 13:35:26,212 P14331 INFO ************ Epoch=7 end ************
+2022-01-23 13:36:10,017 P14331 INFO [Metrics] AUC: 0.944402 - logloss: 0.271048
+2022-01-23 13:36:10,017 P14331 INFO Save best model: monitor(max): 0.944402
+2022-01-23 13:36:10,023 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:36:10,073 P14331 INFO Train loss: 0.304064
+2022-01-23 13:36:10,073 P14331 INFO ************ Epoch=8 end ************
+2022-01-23 13:36:53,642 P14331 INFO [Metrics] AUC: 0.947380 - logloss: 0.264100
+2022-01-23 13:36:53,642 P14331 INFO Save best model: monitor(max): 0.947380
+2022-01-23 13:36:53,648 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:36:53,695 P14331 INFO Train loss: 0.296497
+2022-01-23 13:36:53,696 P14331 INFO ************ Epoch=9 end ************
+2022-01-23 13:37:37,226 P14331 INFO [Metrics] AUC: 0.949104 - logloss: 0.260154
+2022-01-23 13:37:37,226 P14331 INFO Save best model: monitor(max): 0.949104
+2022-01-23 13:37:37,232 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:37:37,279 P14331 INFO Train loss: 0.286990
+2022-01-23 13:37:37,279 P14331 INFO ************ Epoch=10 end ************
+2022-01-23 13:38:21,087 P14331 INFO [Metrics] AUC: 0.950721 - logloss: 0.256409
+2022-01-23 13:38:21,088 P14331 INFO Save best model: monitor(max): 0.950721
+2022-01-23 13:38:21,094 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:38:21,143 P14331 INFO Train loss: 0.279196
+2022-01-23 13:38:21,143 P14331 INFO ************ Epoch=11 end ************
+2022-01-23 13:39:05,407 P14331 INFO [Metrics] AUC: 0.951915 - logloss: 0.253872
+2022-01-23 13:39:05,407 P14331 INFO Save best model: monitor(max): 0.951915
+2022-01-23 13:39:05,413 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:39:05,492 P14331 INFO Train loss: 0.272639
+2022-01-23 13:39:05,493 P14331 INFO ************ Epoch=12 end ************
+2022-01-23 13:39:49,580 P14331 INFO [Metrics] AUC: 0.952646 - logloss: 0.252227
+2022-01-23 13:39:49,581 P14331 INFO Save best model: monitor(max): 0.952646
+2022-01-23 13:39:49,586 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:39:49,638 P14331 INFO Train loss: 0.267295
+2022-01-23 13:39:49,638 P14331 INFO ************ Epoch=13 end ************
+2022-01-23 13:40:33,519 P14331 INFO [Metrics] AUC: 0.953356 - logloss: 0.250443
+2022-01-23 13:40:33,519 P14331 INFO Save best model: monitor(max): 0.953356
+2022-01-23 13:40:33,525 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:40:33,571 P14331 INFO Train loss: 0.262843
+2022-01-23 13:40:33,571 P14331 INFO ************ Epoch=14 end ************
+2022-01-23 13:41:17,246 P14331 INFO [Metrics] AUC: 0.953862 - logloss: 0.249957
+2022-01-23 13:41:17,246 P14331 INFO Save best model: monitor(max): 0.953862
+2022-01-23 13:41:17,252 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:41:17,299 P14331 INFO Train loss: 0.258983
+2022-01-23 13:41:17,299 P14331 INFO ************ Epoch=15 end ************
+2022-01-23 13:42:00,881 P14331 INFO [Metrics] AUC: 0.954835 - logloss: 0.247557
+2022-01-23 13:42:00,881 P14331 INFO Save best model: monitor(max): 0.954835
+2022-01-23 13:42:00,887 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:42:00,934 P14331 INFO Train loss: 0.255221
+2022-01-23 13:42:00,934 P14331 INFO ************ Epoch=16 end ************
+2022-01-23 13:42:44,366 P14331 INFO [Metrics] AUC: 0.955381 - logloss: 0.246348
+2022-01-23 13:42:44,366 P14331 INFO Save best model: monitor(max): 0.955381
+2022-01-23 13:42:44,372 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:42:44,418 P14331 INFO Train loss: 0.251695
+2022-01-23 13:42:44,418 P14331 INFO ************ Epoch=17 end ************
+2022-01-23 13:43:27,947 P14331 INFO [Metrics] AUC: 0.955865 - logloss: 0.245874
+2022-01-23 13:43:27,947 P14331 INFO Save best model: monitor(max): 0.955865
+2022-01-23 13:43:27,953 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:43:27,999 P14331 INFO Train loss: 0.247962
+2022-01-23 13:43:27,999 P14331 INFO ************ Epoch=18 end ************
+2022-01-23 13:44:12,125 P14331 INFO [Metrics] AUC: 0.956488 - logloss: 0.245183
+2022-01-23 13:44:12,126 P14331 INFO Save best model: monitor(max): 0.956488
+2022-01-23 13:44:12,132 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:44:12,182 P14331 INFO Train loss: 0.243939
+2022-01-23 13:44:12,182 P14331 INFO ************ Epoch=19 end ************
+2022-01-23 13:44:55,588 P14331 INFO [Metrics] AUC: 0.957065 - logloss: 0.243498
+2022-01-23 13:44:55,588 P14331 INFO Save best model: monitor(max): 0.957065
+2022-01-23 13:44:55,594 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:44:55,642 P14331 INFO Train loss: 0.240151
+2022-01-23 13:44:55,642 P14331 INFO ************ Epoch=20 end ************
+2022-01-23 13:45:39,335 P14331 INFO [Metrics] AUC: 0.957455 - logloss: 0.244029
+2022-01-23 13:45:39,335 P14331 INFO Save best model: monitor(max): 0.957455
+2022-01-23 13:45:39,341 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:45:39,390 P14331 INFO Train loss: 0.236745
+2022-01-23 13:45:39,390 P14331 INFO ************ Epoch=21 end ************
+2022-01-23 13:46:23,262 P14331 INFO [Metrics] AUC: 0.957577 - logloss: 0.244996
+2022-01-23 13:46:23,263 P14331 INFO Save best model: monitor(max): 0.957577
+2022-01-23 13:46:23,269 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:46:23,318 P14331 INFO Train loss: 0.233452
+2022-01-23 13:46:23,318 P14331 INFO ************ Epoch=22 end ************
+2022-01-23 13:47:07,423 P14331 INFO [Metrics] AUC: 0.957621 - logloss: 0.245759
+2022-01-23 13:47:07,423 P14331 INFO Save best model: monitor(max): 0.957621
+2022-01-23 13:47:07,430 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:47:07,476 P14331 INFO Train loss: 0.230427
+2022-01-23 13:47:07,478 P14331 INFO ************ Epoch=23 end ************
+2022-01-23 13:47:50,921 P14331 INFO [Metrics] AUC: 0.958001 - logloss: 0.246918
+2022-01-23 13:47:50,922 P14331 INFO Save best model: monitor(max): 0.958001
+2022-01-23 13:47:50,929 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:47:50,979 P14331 INFO Train loss: 0.227437
+2022-01-23 13:47:50,979 P14331 INFO ************ Epoch=24 end ************
+2022-01-23 13:48:33,936 P14331 INFO [Metrics] AUC: 0.957814 - logloss: 0.248785
+2022-01-23 13:48:33,937 P14331 INFO Monitor(max) STOP: 0.957814 !
+2022-01-23 13:48:33,937 P14331 INFO Reduce learning rate on plateau: 0.000100
+2022-01-23 13:48:33,937 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:48:33,984 P14331 INFO Train loss: 0.224838
+2022-01-23 13:48:33,985 P14331 INFO ************ Epoch=25 end ************
+2022-01-23 13:49:17,302 P14331 INFO [Metrics] AUC: 0.960105 - logloss: 0.247866
+2022-01-23 13:49:17,302 P14331 INFO Save best model: monitor(max): 0.960105
+2022-01-23 13:49:17,308 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:49:17,368 P14331 INFO Train loss: 0.182522
+2022-01-23 13:49:17,368 P14331 INFO ************ Epoch=26 end ************
+2022-01-23 13:50:00,952 P14331 INFO [Metrics] AUC: 0.960734 - logloss: 0.251583
+2022-01-23 13:50:00,952 P14331 INFO Save best model: monitor(max): 0.960734
+2022-01-23 13:50:00,958 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:50:01,040 P14331 INFO Train loss: 0.162325
+2022-01-23 13:50:01,040 P14331 INFO ************ Epoch=27 end ************
+2022-01-23 13:50:44,974 P14331 INFO [Metrics] AUC: 0.960746 - logloss: 0.257197
+2022-01-23 13:50:44,974 P14331 INFO Save best model: monitor(max): 0.960746
+2022-01-23 13:50:44,980 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:50:45,026 P14331 INFO Train loss: 0.150766
+2022-01-23 13:50:45,026 P14331 INFO ************ Epoch=28 end ************
+2022-01-23 13:51:29,111 P14331 INFO [Metrics] AUC: 0.960468 - logloss: 0.263855
+2022-01-23 13:51:29,111 P14331 INFO Monitor(max) STOP: 0.960468 !
+2022-01-23 13:51:29,111 P14331 INFO Reduce learning rate on plateau: 0.000010
+2022-01-23 13:51:29,111 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:51:29,163 P14331 INFO Train loss: 0.142814
+2022-01-23 13:51:29,163 P14331 INFO ************ Epoch=29 end ************
+2022-01-23 13:52:13,209 P14331 INFO [Metrics] AUC: 0.960383 - logloss: 0.265112
+2022-01-23 13:52:13,210 P14331 INFO Monitor(max) STOP: 0.960383 !
+2022-01-23 13:52:13,210 P14331 INFO Reduce learning rate on plateau: 0.000001
+2022-01-23 13:52:13,210 P14331 INFO Early stopping at epoch=30
+2022-01-23 13:52:13,210 P14331 INFO --- 343/343 batches finished ---
+2022-01-23 13:52:13,263 P14331 INFO Train loss: 0.132949
+2022-01-23 13:52:13,263 P14331 INFO Training finished.
+2022-01-23 13:52:13,264 P14331 INFO Load best model: /home/FuxiCTR/benchmarks/Frappe/DCNv2_movielenslatest_x1/movielenslatest_x1_cd32d937/DCNv2_movielenslatest_x1_005_a53d8bd5.model
+2022-01-23 13:52:13,313 P14331 INFO ****** Validation evaluation ******
+2022-01-23 13:52:14,890 P14331 INFO [Metrics] AUC: 0.960746 - logloss: 0.257197
+2022-01-23 13:52:14,937 P14331 INFO ******** Test evaluation ********
+2022-01-23 13:52:14,938 P14331 INFO Loading data...
+2022-01-23 13:52:14,938 P14331 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/test.h5
+2022-01-23 13:52:14,943 P14331 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
+2022-01-23 13:52:14,943 P14331 INFO Loading test data done.
+2022-01-23 13:52:15,794 P14331 INFO [Metrics] AUC: 0.960600 - logloss: 0.257848
+
+```
diff --git a/ranking/ctr/DCNv2/DCNv2_amazonelectronics_x1/README.md b/ranking/ctr/DCNv2/DCNv2_amazonelectronics_x1/README.md
index 389f2f88..08e7761f 100644
--- a/ranking/ctr/DCNv2/DCNv2_amazonelectronics_x1/README.md
+++ b/ranking/ctr/DCNv2/DCNv2_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DCNv2) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DCNv2) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DCNv2/DCNv2_avazu_x1/README.md b/ranking/ctr/DCNv2/DCNv2_avazu_x1/README.md
index ad6ba19f..6205497f 100644
--- a/ranking/ctr/DCNv2/DCNv2_avazu_x1/README.md
+++ b/ranking/ctr/DCNv2/DCNv2_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_011_19794dc6.log b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_011_19794dc6.log
new file mode 100644
index 00000000..ab9dd4b9
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_011_19794dc6.log
@@ -0,0 +1,86 @@
+2022-05-17 08:01:02,992 P30071 INFO {
+ "batch_norm": "False",
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x4_3bbbc4c9",
+ "debug": "False",
+ "dnn_activations": "relu",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-09",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': False, 'dtype': 'str', 'name': 'id', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'hour', 'preprocess': 'convert_hour', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': ['C1', 'banner_pos', 'site_id', 'site_domain', 'site_category', 'app_id', 'app_domain', 'app_category', 'device_id', 'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekday', 'preprocess': 'convert_weekday', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekend', 'preprocess': 'convert_weekend', 'type': 'categorical'}]",
+ "gpu": "2",
+ "label_col": "{'dtype': 'float', 'name': 'click'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "low_rank": "32",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "2",
+ "model": "DCNv2",
+ "model_id": "DCNv2_avazu_x4_011_19794dc6",
+ "model_root": "./Avazu/DCNv2_avazu_x4_001/",
+ "model_structure": "parallel",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_dropout": "0",
+ "net_regularizer": "0",
+ "num_cross_layers": "4",
+ "num_experts": "4",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "parallel_dnn_hidden_units": "[2000, 2000, 2000, 2000]",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "stacked_dnn_hidden_units": "[500, 500, 500]",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x4/test.csv",
+ "train_data": "../data/Avazu/Avazu_x4/train.csv",
+ "use_hdf5": "True",
+ "use_low_rank_mixture": "False",
+ "valid_data": "../data/Avazu/Avazu_x4/valid.csv",
+ "verbose": "0",
+ "version": "pytorch"
+}
+2022-05-17 08:01:02,992 P30071 INFO Set up feature encoder...
+2022-05-17 08:01:02,992 P30071 INFO Load feature_map from json: ../data/Avazu/avazu_x4_3bbbc4c9/feature_map.json
+2022-05-17 08:01:02,993 P30071 INFO Loading data...
+2022-05-17 08:01:02,993 P30071 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/train.h5
+2022-05-17 08:01:05,999 P30071 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/valid.h5
+2022-05-17 08:01:06,425 P30071 INFO Train samples: total/32343172, pos/5492052, neg/26851120, ratio/16.98%, blocks/1
+2022-05-17 08:01:06,425 P30071 INFO Validation samples: total/4042897, pos/686507, neg/3356390, ratio/16.98%, blocks/1
+2022-05-17 08:01:06,425 P30071 INFO Loading train data done.
+2022-05-17 08:01:13,233 P30071 INFO Total number of parameters: 73385345.
+2022-05-17 08:01:13,234 P30071 INFO Start training: 3235 batches/epoch
+2022-05-17 08:01:13,234 P30071 INFO ************ Epoch=1 start ************
+2022-05-17 08:19:08,450 P30071 INFO [Metrics] AUC: 0.792978 - logloss: 0.371967
+2022-05-17 08:19:08,453 P30071 INFO Save best model: monitor(max): 0.421011
+2022-05-17 08:19:08,971 P30071 INFO --- 3235/3235 batches finished ---
+2022-05-17 08:19:09,016 P30071 INFO Train loss: 0.380317
+2022-05-17 08:19:09,016 P30071 INFO ************ Epoch=1 end ************
+2022-05-17 08:37:02,790 P30071 INFO [Metrics] AUC: 0.788575 - logloss: 0.383554
+2022-05-17 08:37:02,792 P30071 INFO Monitor(max) STOP: 0.405021 !
+2022-05-17 08:37:02,792 P30071 INFO Reduce learning rate on plateau: 0.000100
+2022-05-17 08:37:02,792 P30071 INFO --- 3235/3235 batches finished ---
+2022-05-17 08:37:02,829 P30071 INFO Train loss: 0.332264
+2022-05-17 08:37:02,829 P30071 INFO ************ Epoch=2 end ************
+2022-05-17 08:54:55,819 P30071 INFO [Metrics] AUC: 0.776121 - logloss: 0.427745
+2022-05-17 08:54:55,822 P30071 INFO Monitor(max) STOP: 0.348377 !
+2022-05-17 08:54:55,822 P30071 INFO Reduce learning rate on plateau: 0.000010
+2022-05-17 08:54:55,822 P30071 INFO Early stopping at epoch=3
+2022-05-17 08:54:55,822 P30071 INFO --- 3235/3235 batches finished ---
+2022-05-17 08:54:55,858 P30071 INFO Train loss: 0.291580
+2022-05-17 08:54:55,858 P30071 INFO Training finished.
+2022-05-17 08:54:55,859 P30071 INFO Load best model: /cache/FuxiCTR/benchmarks/Avazu/DCNv2_avazu_x4_001/avazu_x4_3bbbc4c9/DCNv2_avazu_x4_011_19794dc6.model
+2022-05-17 08:55:02,455 P30071 INFO ****** Validation evaluation ******
+2022-05-17 08:55:24,972 P30071 INFO [Metrics] AUC: 0.792978 - logloss: 0.371967
+2022-05-17 08:55:25,040 P30071 INFO ******** Test evaluation ********
+2022-05-17 08:55:25,040 P30071 INFO Loading data...
+2022-05-17 08:55:25,041 P30071 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/test.h5
+2022-05-17 08:55:25,405 P30071 INFO Test samples: total/4042898, pos/686507, neg/3356391, ratio/16.98%, blocks/1
+2022-05-17 08:55:25,405 P30071 INFO Loading test data done.
+2022-05-17 08:55:47,592 P30071 INFO [Metrics] AUC: 0.793146 - logloss: 0.371865
diff --git a/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02.csv b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02.csv
new file mode 100644
index 00000000..164d809d
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02.csv
@@ -0,0 +1,12 @@
+ 20220517-085547,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_011_19794dc6 --gpu 2,[exp_id] DCNv2_avazu_x4_011_19794dc6,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792978 - logloss: 0.371967,[test] AUC: 0.793146 - logloss: 0.371865
+ 20220517-082457,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_005_0f059aed --gpu 4,[exp_id] DCNv2_avazu_x4_005_0f059aed,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792974 - logloss: 0.371977,[test] AUC: 0.793132 - logloss: 0.371870
+ 20220517-090523,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_012_b77e203c --gpu 3,[exp_id] DCNv2_avazu_x4_012_b77e203c,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792736 - logloss: 0.372289,[test] AUC: 0.793061 - logloss: 0.372097
+ 20220517-090552,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_010_ef8256f1 --gpu 1,[exp_id] DCNv2_avazu_x4_010_ef8256f1,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792943 - logloss: 0.372103,[test] AUC: 0.793028 - logloss: 0.372038
+ 20220517-090523,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_004_1a2da6a6 --gpu 3,[exp_id] DCNv2_avazu_x4_004_1a2da6a6,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792920 - logloss: 0.372162,[test] AUC: 0.793015 - logloss: 0.372100
+ 20220517-082854,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_006_42ea03cb --gpu 5,[exp_id] DCNv2_avazu_x4_006_42ea03cb,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792636 - logloss: 0.372354,[test] AUC: 0.793005 - logloss: 0.372145
+ 20220517-083024,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_008_5a1073ef --gpu 7,[exp_id] DCNv2_avazu_x4_008_5a1073ef,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792870 - logloss: 0.372163,[test] AUC: 0.792935 - logloss: 0.372117
+ 20220517-090550,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_002_1a82ae64 --gpu 1,[exp_id] DCNv2_avazu_x4_002_1a82ae64,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792833 - logloss: 0.372307,[test] AUC: 0.792884 - logloss: 0.372263
+ 20220517-085654,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_001_5e30b8d4 --gpu 0,[exp_id] DCNv2_avazu_x4_001_5e30b8d4,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792771 - logloss: 0.372056,[test] AUC: 0.792880 - logloss: 0.371993
+ 20220517-082622,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_007_6eb830fd --gpu 6,[exp_id] DCNv2_avazu_x4_007_6eb830fd,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792723 - logloss: 0.372107,[test] AUC: 0.792782 - logloss: 0.372064
+ 20220517-085653,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_009_5084a41d --gpu 0,[exp_id] DCNv2_avazu_x4_009_5084a41d,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792479 - logloss: 0.372199,[test] AUC: 0.792761 - logloss: 0.372073
+ 20220517-085542,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_003_09b69ca2 --gpu 2,[exp_id] DCNv2_avazu_x4_003_09b69ca2,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792428 - logloss: 0.372241,[test] AUC: 0.792668 - logloss: 0.372139
diff --git a/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02.yaml b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02.yaml
new file mode 100644
index 00000000..2cf1db06
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02.yaml
@@ -0,0 +1,39 @@
+base_config: ../config/
+base_expid: DCNv2_base
+dataset_id: avazu_x4
+
+dataset_config:
+ avazu_x4:
+ data_root: ../data/Avazu/
+ data_format: csv
+ train_data: ../data/Avazu/Avazu_x4/train.csv
+ valid_data: ../data/Avazu/Avazu_x4/valid.csv
+ test_data: ../data/Avazu/Avazu_x4/test.csv
+ min_categr_count: 2
+ feature_cols:
+ - {name: id, active: False, dtype: str, type: categorical}
+ - {name: hour, active: True, dtype: str, type: categorical, preprocess: convert_hour}
+ - {name: [C1,banner_pos,site_id,site_domain,site_category,app_id,app_domain,app_category,device_id,
+ device_ip,device_model,device_type,device_conn_type,C14,C15,C16,C17,C18,C19,C20,C21],
+ active: True, dtype: str, type: categorical}
+ - {name: weekday, active: True, dtype: str, type: categorical, preprocess: convert_weekday}
+ - {name: weekend, active: True, dtype: str, type: categorical, preprocess: convert_weekend}
+ label_col: {name: click, dtype: float}
+
+
+tuner_space:
+ model_root: './Avazu/DCNv2_avazu_x4_001/'
+ embedding_dim: 16
+ embedding_regularizer: [0, 1.e-9]
+ batch_norm: [False, True]
+ model_structure: parallel
+ parallel_dnn_hidden_units: [[2000, 2000, 2000, 2000]]
+ num_cross_layers: [6, 5, 4]
+ net_dropout: 0
+ learning_rate: 1.e-3
+ batch_size: 10000
+ seed: 2019
+ monitor: {'AUC': 1, 'logloss': -1}
+ metrics: [[AUC, logloss]]
+ verbose: 0
+
diff --git a/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02/dataset_config.yaml b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02/dataset_config.yaml
new file mode 100644
index 00000000..7426d025
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02/dataset_config.yaml
@@ -0,0 +1,19 @@
+avazu_x4_3bbbc4c9:
+ data_format: csv
+ data_root: ../data/Avazu/
+ feature_cols:
+ - {active: false, dtype: str, name: id, type: categorical}
+ - {active: true, dtype: str, name: hour, preprocess: convert_hour, type: categorical}
+ - active: true
+ dtype: str
+ name: [C1, banner_pos, site_id, site_domain, site_category, app_id, app_domain,
+ app_category, device_id, device_ip, device_model, device_type, device_conn_type,
+ C14, C15, C16, C17, C18, C19, C20, C21]
+ type: categorical
+ - {active: true, dtype: str, name: weekday, preprocess: convert_weekday, type: categorical}
+ - {active: true, dtype: str, name: weekend, preprocess: convert_weekend, type: categorical}
+ label_col: {dtype: float, name: click}
+ min_categr_count: 2
+ test_data: ../data/Avazu/Avazu_x4/test.csv
+ train_data: ../data/Avazu/Avazu_x4/train.csv
+ valid_data: ../data/Avazu/Avazu_x4/valid.csv
diff --git a/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02/model_config.yaml b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02/model_config.yaml
new file mode 100644
index 00000000..d758462f
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02/model_config.yaml
@@ -0,0 +1,444 @@
+DCNv2_avazu_x4_001_5e30b8d4:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 6
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_002_1a82ae64:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 6
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_003_09b69ca2:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 5
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_004_1a2da6a6:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 5
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_005_0f059aed:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 4
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_006_42ea03cb:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 4
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_007_6eb830fd:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 6
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_008_5a1073ef:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 6
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_009_5084a41d:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 5
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_010_ef8256f1:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 5
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_011_19794dc6:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 4
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
+DCNv2_avazu_x4_012_b77e203c:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: avazu_x4_3bbbc4c9
+ debug: false
+ dnn_activations: relu
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ every_x_epochs: 1
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_id: DCNv2_base
+ model_root: ./Avazu/DCNv2_avazu_x4_001/
+ model_structure: parallel
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_regularizer: 0
+ num_cross_layers: 4
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [2000, 2000, 2000, 2000]
+ patience: 2
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_hdf5: true
+ use_low_rank_mixture: false
+ verbose: 0
diff --git a/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/README.md b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/README.md
new file mode 100644
index 00000000..4ce19f1e
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/README.md
@@ -0,0 +1,161 @@
+## DCNv2_avazu_x4_001
+
+A hands-on guide to run the DCNv2 model on the Avazu_x4 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+
+| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
+|:-----------------------------:|:-----------:|:--------:|:--------:|-------|
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.6.4
+ pytorch: 1.0.0
+ pandas: 0.22.0
+ numpy: 1.19.2
+ scipy: 1.5.4
+ sklearn: 0.22.1
+ pyyaml: 5.4.1
+ h5py: 2.8.0
+ tqdm: 4.60.0
+ fuxictr: 1.1.1
+
+ ```
+
+### Dataset
+Please refer to [Avazu_x4]([Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4)) to get the dataset details.
+
+### Code
+
+We use the [DCNv2](https://github.com/reczoo/FuxiCTR/tree/v1.1.1/model_zoo/DCNv2) model code from [FuxiCTR-v1.1.1](https://github.com/reczoo/FuxiCTR/tree/v1.1.1) for this experiment.
+
+Running steps:
+
+1. Download [FuxiCTR-v1.1.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.1.zip) and install all the dependencies listed in the [environments](#environments).
+
+ ```bash
+ pip uninstall fuxictr
+ pip install fuxictr==1.1.1
+ ```
+
+2. Create a data directory and put the downloaded data files in `../data/Avazu/Avazu_x4`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DCNv2_avazu_x4_tuner_config_02](./DCNv2_avazu_x4_tuner_config_02). Please make sure that the data paths in `dataset_config.yaml` are correctly set.
+
+4. Run the following script to start training and evaluation.
+
+ ```bash
+ cd FuxiCTR/model_zoo/DCNv2
+ nohup python run_expid.py --config YOUR_PATH/DCNv2/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_011_19794dc6 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.793146 | 0.371865 |
+
+
+### Logs
+```python
+2022-05-17 08:01:02,992 P30071 INFO {
+ "batch_norm": "False",
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x4_3bbbc4c9",
+ "debug": "False",
+ "dnn_activations": "relu",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-09",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': False, 'dtype': 'str', 'name': 'id', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'hour', 'preprocess': 'convert_hour', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': ['C1', 'banner_pos', 'site_id', 'site_domain', 'site_category', 'app_id', 'app_domain', 'app_category', 'device_id', 'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekday', 'preprocess': 'convert_weekday', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekend', 'preprocess': 'convert_weekend', 'type': 'categorical'}]",
+ "gpu": "2",
+ "label_col": "{'dtype': 'float', 'name': 'click'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "low_rank": "32",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "2",
+ "model": "DCNv2",
+ "model_id": "DCNv2_avazu_x4_011_19794dc6",
+ "model_root": "./Avazu/DCNv2_avazu_x4_001/",
+ "model_structure": "parallel",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_dropout": "0",
+ "net_regularizer": "0",
+ "num_cross_layers": "4",
+ "num_experts": "4",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "parallel_dnn_hidden_units": "[2000, 2000, 2000, 2000]",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "stacked_dnn_hidden_units": "[500, 500, 500]",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x4/test.csv",
+ "train_data": "../data/Avazu/Avazu_x4/train.csv",
+ "use_hdf5": "True",
+ "use_low_rank_mixture": "False",
+ "valid_data": "../data/Avazu/Avazu_x4/valid.csv",
+ "verbose": "0",
+ "version": "pytorch"
+}
+2022-05-17 08:01:02,992 P30071 INFO Set up feature encoder...
+2022-05-17 08:01:02,992 P30071 INFO Load feature_map from json: ../data/Avazu/avazu_x4_3bbbc4c9/feature_map.json
+2022-05-17 08:01:02,993 P30071 INFO Loading data...
+2022-05-17 08:01:02,993 P30071 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/train.h5
+2022-05-17 08:01:05,999 P30071 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/valid.h5
+2022-05-17 08:01:06,425 P30071 INFO Train samples: total/32343172, pos/5492052, neg/26851120, ratio/16.98%, blocks/1
+2022-05-17 08:01:06,425 P30071 INFO Validation samples: total/4042897, pos/686507, neg/3356390, ratio/16.98%, blocks/1
+2022-05-17 08:01:06,425 P30071 INFO Loading train data done.
+2022-05-17 08:01:13,233 P30071 INFO Total number of parameters: 73385345.
+2022-05-17 08:01:13,234 P30071 INFO Start training: 3235 batches/epoch
+2022-05-17 08:01:13,234 P30071 INFO ************ Epoch=1 start ************
+2022-05-17 08:19:08,450 P30071 INFO [Metrics] AUC: 0.792978 - logloss: 0.371967
+2022-05-17 08:19:08,453 P30071 INFO Save best model: monitor(max): 0.421011
+2022-05-17 08:19:08,971 P30071 INFO --- 3235/3235 batches finished ---
+2022-05-17 08:19:09,016 P30071 INFO Train loss: 0.380317
+2022-05-17 08:19:09,016 P30071 INFO ************ Epoch=1 end ************
+2022-05-17 08:37:02,790 P30071 INFO [Metrics] AUC: 0.788575 - logloss: 0.383554
+2022-05-17 08:37:02,792 P30071 INFO Monitor(max) STOP: 0.405021 !
+2022-05-17 08:37:02,792 P30071 INFO Reduce learning rate on plateau: 0.000100
+2022-05-17 08:37:02,792 P30071 INFO --- 3235/3235 batches finished ---
+2022-05-17 08:37:02,829 P30071 INFO Train loss: 0.332264
+2022-05-17 08:37:02,829 P30071 INFO ************ Epoch=2 end ************
+2022-05-17 08:54:55,819 P30071 INFO [Metrics] AUC: 0.776121 - logloss: 0.427745
+2022-05-17 08:54:55,822 P30071 INFO Monitor(max) STOP: 0.348377 !
+2022-05-17 08:54:55,822 P30071 INFO Reduce learning rate on plateau: 0.000010
+2022-05-17 08:54:55,822 P30071 INFO Early stopping at epoch=3
+2022-05-17 08:54:55,822 P30071 INFO --- 3235/3235 batches finished ---
+2022-05-17 08:54:55,858 P30071 INFO Train loss: 0.291580
+2022-05-17 08:54:55,858 P30071 INFO Training finished.
+2022-05-17 08:54:55,859 P30071 INFO Load best model: /cache/FuxiCTR/benchmarks/Avazu/DCNv2_avazu_x4_001/avazu_x4_3bbbc4c9/DCNv2_avazu_x4_011_19794dc6.model
+2022-05-17 08:55:02,455 P30071 INFO ****** Validation evaluation ******
+2022-05-17 08:55:24,972 P30071 INFO [Metrics] AUC: 0.792978 - logloss: 0.371967
+2022-05-17 08:55:25,040 P30071 INFO ******** Test evaluation ********
+2022-05-17 08:55:25,040 P30071 INFO Loading data...
+2022-05-17 08:55:25,041 P30071 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/test.h5
+2022-05-17 08:55:25,405 P30071 INFO Test samples: total/4042898, pos/686507, neg/3356391, ratio/16.98%, blocks/1
+2022-05-17 08:55:25,405 P30071 INFO Loading test data done.
+2022-05-17 08:55:47,592 P30071 INFO [Metrics] AUC: 0.793146 - logloss: 0.371865
+
+```
diff --git a/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/environments.txt b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/environments.txt
new file mode 100644
index 00000000..72292da3
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/environments.txt
@@ -0,0 +1,17 @@
+[Hardware]
+CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+GPU: Tesla V100 32G
+RAM: 755G
+
+[Software]
+CUDA: 10.2
+python: 3.6.4
+pytorch: 1.0.0
+pandas: 0.22.0
+numpy: 1.19.2
+scipy: 1.5.4
+sklearn: 0.22.1
+pyyaml: 5.4.1
+h5py: 2.8.0
+tqdm: 4.60.0
+fuxictr: 1.1.1
diff --git a/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/results.csv b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/results.csv
new file mode 100644
index 00000000..b3e3b5e9
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_avazu_x4_001/results.csv
@@ -0,0 +1 @@
+ 20220517-085547,[command] python run_expid.py --version pytorch --config Avazu/DCNv2_avazu_x4_001/DCNv2_avazu_x4_tuner_config_02 --expid DCNv2_avazu_x4_011_19794dc6 --gpu 2,[exp_id] DCNv2_avazu_x4_011_19794dc6,[dataset_id] avazu_x4_3bbbc4c9,[train] N.A.,[val] AUC: 0.792978 - logloss: 0.371967,[test] AUC: 0.793146 - logloss: 0.371865
diff --git a/ranking/ctr/DCNv2/DCNv2_criteo_x1/README.md b/ranking/ctr/DCNv2/DCNv2_criteo_x1/README.md
index 4c574004..6bf810ab 100644
--- a/ranking/ctr/DCNv2/DCNv2_criteo_x1/README.md
+++ b/ranking/ctr/DCNv2/DCNv2_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_001_005_c2376d55.log b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_001_005_c2376d55.log
new file mode 100644
index 00000000..4ab28459
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_001_005_c2376d55.log
@@ -0,0 +1,159 @@
+2024-02-19 11:28:52,376 P3740343 INFO Params: {
+ "batch_norm": "True",
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x4_001_a5e05ce7",
+ "debug_mode": "False",
+ "dnn_activations": "relu",
+ "early_stop_patience": "2",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'fill_na': 0, 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'preprocess': 'convert_to_bucket', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'fill_na': '', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "feature_config": "None",
+ "feature_specs": "None",
+ "gpu": "4",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'Label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "low_rank": "32",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "10",
+ "model": "DCNv2",
+ "model_id": "DCNv2_criteo_x4_001_005_c2376d55",
+ "model_root": "./checkpoints/",
+ "model_structure": "parallel",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0.1",
+ "net_regularizer": "0",
+ "num_cross_layers": "3",
+ "num_experts": "4",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "parallel_dnn_hidden_units": "[1000, 1000, 1000, 1000, 1000]",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "stacked_dnn_hidden_units": "[500, 500, 500]",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x4/test.csv",
+ "train_data": "../data/Criteo/Criteo_x4/train.csv",
+ "use_features": "None",
+ "use_low_rank_mixture": "False",
+ "valid_data": "../data/Criteo/Criteo_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-19 11:28:52,376 P3740343 INFO Set up feature processor...
+2024-02-19 11:28:52,377 P3740343 WARNING Skip rebuilding ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-19 11:28:52,377 P3740343 INFO Load feature_map from json: ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json
+2024-02-19 11:28:52,377 P3740343 INFO Set column index...
+2024-02-19 11:28:52,377 P3740343 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 1446}",
+ "C10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 39530}",
+ "C11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5131}",
+ "C12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 156656}",
+ "C13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 3176}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 28}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11043}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 148913}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 12}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4560}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2003}",
+ "C2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 554}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 154564}",
+ "C22": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C23": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 17}",
+ "C24": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 53031}",
+ "C25": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 82}",
+ "C26": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 40955}",
+ "C3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 157339}",
+ "C4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 117822}",
+ "C5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 306}",
+ "C6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11882}",
+ "C8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 630}",
+ "C9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "I1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 44}",
+ "I10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "I11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "I12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 37}",
+ "I13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 72}",
+ "I2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 99}",
+ "I3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 122}",
+ "I4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 41}",
+ "I5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 220}",
+ "I6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 112}",
+ "I7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 80}",
+ "I8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 69}",
+ "I9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 92}"
+}
+2024-02-19 11:28:57,677 P3740343 INFO Total number of parameters: 20382577.
+2024-02-19 11:28:57,677 P3740343 INFO Loading datasets...
+2024-02-19 11:29:33,844 P3740343 INFO Train samples: total/36672493, blocks/1
+2024-02-19 11:29:38,337 P3740343 INFO Validation samples: total/4584062, blocks/1
+2024-02-19 11:29:38,337 P3740343 INFO Loading train and validation data done.
+2024-02-19 11:29:38,338 P3740343 INFO Start training: 3668 batches/epoch
+2024-02-19 11:29:38,338 P3740343 INFO ************ Epoch=1 start ************
+2024-02-19 11:34:45,532 P3740343 INFO Train loss: 0.458952
+2024-02-19 11:34:45,533 P3740343 INFO Evaluation @epoch 1 - batch 3668:
+2024-02-19 11:35:00,517 P3740343 INFO [Metrics] AUC: 0.806450
+2024-02-19 11:35:00,521 P3740343 INFO Save best model: monitor(max)=0.806450
+2024-02-19 11:35:00,679 P3740343 INFO ************ Epoch=1 end ************
+2024-02-19 11:40:08,747 P3740343 INFO Train loss: 0.451408
+2024-02-19 11:40:08,747 P3740343 INFO Evaluation @epoch 2 - batch 3668:
+2024-02-19 11:40:24,340 P3740343 INFO [Metrics] AUC: 0.809079
+2024-02-19 11:40:24,344 P3740343 INFO Save best model: monitor(max)=0.809079
+2024-02-19 11:40:24,518 P3740343 INFO ************ Epoch=2 end ************
+2024-02-19 11:45:34,026 P3740343 INFO Train loss: 0.449765
+2024-02-19 11:45:34,027 P3740343 INFO Evaluation @epoch 3 - batch 3668:
+2024-02-19 11:45:49,178 P3740343 INFO [Metrics] AUC: 0.810513
+2024-02-19 11:45:49,180 P3740343 INFO Save best model: monitor(max)=0.810513
+2024-02-19 11:45:49,358 P3740343 INFO ************ Epoch=3 end ************
+2024-02-19 11:50:55,398 P3740343 INFO Train loss: 0.448540
+2024-02-19 11:50:55,398 P3740343 INFO Evaluation @epoch 4 - batch 3668:
+2024-02-19 11:51:10,992 P3740343 INFO [Metrics] AUC: 0.811169
+2024-02-19 11:51:10,993 P3740343 INFO Save best model: monitor(max)=0.811169
+2024-02-19 11:51:11,171 P3740343 INFO ************ Epoch=4 end ************
+2024-02-19 11:56:19,561 P3740343 INFO Train loss: 0.447629
+2024-02-19 11:56:19,562 P3740343 INFO Evaluation @epoch 5 - batch 3668:
+2024-02-19 11:56:34,608 P3740343 INFO [Metrics] AUC: 0.811757
+2024-02-19 11:56:34,609 P3740343 INFO Save best model: monitor(max)=0.811757
+2024-02-19 11:56:34,790 P3740343 INFO ************ Epoch=5 end ************
+2024-02-19 12:01:40,413 P3740343 INFO Train loss: 0.446832
+2024-02-19 12:01:40,413 P3740343 INFO Evaluation @epoch 6 - batch 3668:
+2024-02-19 12:01:55,503 P3740343 INFO [Metrics] AUC: 0.811747
+2024-02-19 12:01:55,506 P3740343 INFO Monitor(max)=0.811747 STOP!
+2024-02-19 12:01:55,506 P3740343 INFO Reduce learning rate on plateau: 0.000100
+2024-02-19 12:01:55,559 P3740343 INFO ************ Epoch=6 end ************
+2024-02-19 12:07:00,817 P3740343 INFO Train loss: 0.435416
+2024-02-19 12:07:00,817 P3740343 INFO Evaluation @epoch 7 - batch 3668:
+2024-02-19 12:07:15,793 P3740343 INFO [Metrics] AUC: 0.814037
+2024-02-19 12:07:15,794 P3740343 INFO Save best model: monitor(max)=0.814037
+2024-02-19 12:07:15,963 P3740343 INFO ************ Epoch=7 end ************
+2024-02-19 12:12:25,653 P3740343 INFO Train loss: 0.431232
+2024-02-19 12:12:25,654 P3740343 INFO Evaluation @epoch 8 - batch 3668:
+2024-02-19 12:12:40,871 P3740343 INFO [Metrics] AUC: 0.813989
+2024-02-19 12:12:40,872 P3740343 INFO Monitor(max)=0.813989 STOP!
+2024-02-19 12:12:40,872 P3740343 INFO Reduce learning rate on plateau: 0.000010
+2024-02-19 12:12:40,923 P3740343 INFO ************ Epoch=8 end ************
+2024-02-19 12:17:46,267 P3740343 INFO Train loss: 0.427107
+2024-02-19 12:17:46,267 P3740343 INFO Evaluation @epoch 9 - batch 3668:
+2024-02-19 12:18:00,971 P3740343 INFO [Metrics] AUC: 0.813659
+2024-02-19 12:18:00,972 P3740343 INFO Monitor(max)=0.813659 STOP!
+2024-02-19 12:18:00,972 P3740343 INFO Reduce learning rate on plateau: 0.000001
+2024-02-19 12:18:00,972 P3740343 INFO ********* Epoch==9 early stop *********
+2024-02-19 12:18:01,025 P3740343 INFO Training finished.
+2024-02-19 12:18:01,026 P3740343 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/criteo_x4_001_a5e05ce7/DCNv2_criteo_x4_001_005_c2376d55.model
+2024-02-19 12:18:01,096 P3740343 INFO ****** Validation evaluation ******
+2024-02-19 12:18:17,843 P3740343 INFO [Metrics] AUC: 0.814037 - logloss: 0.438087
+2024-02-19 12:18:17,963 P3740343 INFO ******** Test evaluation ********
+2024-02-19 12:18:17,964 P3740343 INFO Loading datasets...
+2024-02-19 12:18:22,506 P3740343 INFO Test samples: total/4584062, blocks/1
+2024-02-19 12:18:22,507 P3740343 INFO Loading test data done.
+2024-02-19 12:18:38,751 P3740343 INFO [Metrics] AUC: 0.814514 - logloss: 0.437631
diff --git a/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01.csv b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01.csv
new file mode 100644
index 00000000..d1bebef5
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01.csv
@@ -0,0 +1,16 @@
+ 20240219-121838,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_005_c2376d55 --gpu 4,[exp_id] DCNv2_criteo_x4_001_005_c2376d55,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814037 - logloss: 0.438087,[test] AUC: 0.814514 - logloss: 0.437631
+ 20240219-121627,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_006_c15183f0 --gpu 5,[exp_id] DCNv2_criteo_x4_001_006_c15183f0,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813820 - logloss: 0.438281,[test] AUC: 0.814352 - logloss: 0.437796
+ 20240219-122913,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_007_d2d39ad9 --gpu 6,[exp_id] DCNv2_criteo_x4_001_007_d2d39ad9,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813793 - logloss: 0.438481,[test] AUC: 0.814209 - logloss: 0.438106
+ 20240219-123015,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_004_3a036efb --gpu 3,[exp_id] DCNv2_criteo_x4_001_004_3a036efb,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813738 - logloss: 0.438254,[test] AUC: 0.814156 - logloss: 0.437873
+ 20240219-131219,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_011_159c60bb --gpu 7,[exp_id] DCNv2_criteo_x4_001_011_159c60bb,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813719 - logloss: 0.438461,[test] AUC: 0.814145 - logloss: 0.438052
+ 20240219-122123,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_008_37995540 --gpu 7,[exp_id] DCNv2_criteo_x4_001_008_37995540,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813714 - logloss: 0.438380,[test] AUC: 0.814114 - logloss: 0.438063
+ 20240219-131429,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_012_547935d0 --gpu 1,[exp_id] DCNv2_criteo_x4_001_012_547935d0,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813647 - logloss: 0.438510,[test] AUC: 0.814106 - logloss: 0.438116
+ 20240219-130742,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_010_46d11cb5 --gpu 4,[exp_id] DCNv2_criteo_x4_001_010_46d11cb5,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813360 - logloss: 0.438884,[test] AUC: 0.813915 - logloss: 0.438382
+ 20240219-122642,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_001_b898c4c8 --gpu 0,[exp_id] DCNv2_criteo_x4_001_001_b898c4c8,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813570 - logloss: 0.438661,[test] AUC: 0.813902 - logloss: 0.438370
+ 20240219-132006,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_013_38f2f74e --gpu 0,[exp_id] DCNv2_criteo_x4_001_013_38f2f74e,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813395 - logloss: 0.438908,[test] AUC: 0.813787 - logloss: 0.438528
+ 20240219-132253,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_015_7fed2c5d --gpu 3,[exp_id] DCNv2_criteo_x4_001_015_7fed2c5d,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813320 - logloss: 0.438952,[test] AUC: 0.813727 - logloss: 0.438604
+ 20240219-122505,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_002_29c70823 --gpu 1,[exp_id] DCNv2_criteo_x4_001_002_29c70823,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813073 - logloss: 0.439037,[test] AUC: 0.813572 - logloss: 0.438591
+ 20240219-131321,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_009_a3ea8161 --gpu 5,[exp_id] DCNv2_criteo_x4_001_009_a3ea8161,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813049 - logloss: 0.439343,[test] AUC: 0.813543 - logloss: 0.438934
+ 20240219-123751,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_003_ee267cb0 --gpu 2,[exp_id] DCNv2_criteo_x4_001_003_ee267cb0,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813195 - logloss: 0.439078,[test] AUC: 0.813540 - logloss: 0.438766
+ 20240219-132848,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_016_4f31dc10 --gpu 2,[exp_id] DCNv2_criteo_x4_001_016_4f31dc10,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813184 - logloss: 0.439093,[test] AUC: 0.813486 - logloss: 0.438841
+ 20240219-132027,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_014_a196cc4b --gpu 6,[exp_id] DCNv2_criteo_x4_001_014_a196cc4b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812975 - logloss: 0.439308,[test] AUC: 0.813379 - logloss: 0.438959
diff --git a/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01.yaml b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01.yaml
new file mode 100644
index 00000000..671bae72
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01.yaml
@@ -0,0 +1,45 @@
+base_config: ../model_zoo/DCNv2/config/
+base_expid: DCNv2_default
+dataset_id: criteo_x4_001
+
+dataset_config:
+ criteo_x4_001:
+ data_format: csv
+ data_root: ../data/Criteo/
+ feature_cols:
+ - active: true
+ dtype: float
+ fill_na: 0
+ name: [I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13]
+ preprocess: convert_to_bucket
+ type: categorical
+ - active: true
+ dtype: str
+ fill_na: ''
+ name: [C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16,
+ C17, C18, C19, C20, C21, C22, C23, C24, C25, C26]
+ type: categorical
+ label_col: {dtype: float, name: Label}
+ min_categr_count: 10
+ test_data: ../data/Criteo/Criteo_x4/test.csv
+ train_data: ../data/Criteo/Criteo_x4/train.csv
+ valid_data: ../data/Criteo/Criteo_x4/valid.csv
+
+tuner_space:
+ model_root: './checkpoints/'
+ embedding_dim: 16
+ embedding_regularizer: 1.e-5
+ parallel_dnn_hidden_units: [[1000, 1000, 1000, 1000, 1000]]
+ model_structure: parallel
+ use_low_rank_mixture: False
+ num_cross_layers: [2, 3, 4, 5]
+ net_dropout: [0.1, 0.2]
+ batch_norm: [True, False]
+ learning_rate: 1.e-3
+ batch_size: 10000
+ seed: 2019
+ early_stop_patience: 2
+ monitor: 'AUC'
+ monitor_mode: 'max'
+ metrics: [['AUC', 'logloss']]
+
diff --git a/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01/dataset_config.yaml b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01/dataset_config.yaml
new file mode 100644
index 00000000..73e334c1
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01/dataset_config.yaml
@@ -0,0 +1,21 @@
+criteo_x4_001_a5e05ce7:
+ data_format: csv
+ data_root: ../data/Criteo/
+ feature_cols:
+ - active: true
+ dtype: float
+ fill_na: 0
+ name: [I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13]
+ preprocess: convert_to_bucket
+ type: categorical
+ - active: true
+ dtype: str
+ fill_na: ''
+ name: [C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16,
+ C17, C18, C19, C20, C21, C22, C23, C24, C25, C26]
+ type: categorical
+ label_col: {dtype: float, name: Label}
+ min_categr_count: 10
+ test_data: ../data/Criteo/Criteo_x4/test.csv
+ train_data: ../data/Criteo/Criteo_x4/train.csv
+ valid_data: ../data/Criteo/Criteo_x4/valid.csv
diff --git a/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01/model_config.yaml b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01/model_config.yaml
new file mode 100644
index 00000000..9a1189d7
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01/model_config.yaml
@@ -0,0 +1,624 @@
+DCNv2_criteo_x4_001_001_b898c4c8:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.1
+ net_regularizer: 0
+ num_cross_layers: 2
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_002_29c70823:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.1
+ net_regularizer: 0
+ num_cross_layers: 2
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_003_ee267cb0:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.2
+ net_regularizer: 0
+ num_cross_layers: 2
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_004_3a036efb:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.2
+ net_regularizer: 0
+ num_cross_layers: 2
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_005_c2376d55:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.1
+ net_regularizer: 0
+ num_cross_layers: 3
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_006_c15183f0:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.1
+ net_regularizer: 0
+ num_cross_layers: 3
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_007_d2d39ad9:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.2
+ net_regularizer: 0
+ num_cross_layers: 3
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_008_37995540:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.2
+ net_regularizer: 0
+ num_cross_layers: 3
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_009_a3ea8161:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.1
+ net_regularizer: 0
+ num_cross_layers: 4
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_010_46d11cb5:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.1
+ net_regularizer: 0
+ num_cross_layers: 4
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_011_159c60bb:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.2
+ net_regularizer: 0
+ num_cross_layers: 4
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_012_547935d0:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.2
+ net_regularizer: 0
+ num_cross_layers: 4
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_013_38f2f74e:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.1
+ net_regularizer: 0
+ num_cross_layers: 5
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_014_a196cc4b:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.1
+ net_regularizer: 0
+ num_cross_layers: 5
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_015_7fed2c5d:
+ batch_norm: true
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.2
+ net_regularizer: 0
+ num_cross_layers: 5
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
+DCNv2_criteo_x4_001_016_4f31dc10:
+ batch_norm: false
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_activations: relu
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ low_rank: 32
+ metrics: [AUC, logloss]
+ model: DCNv2
+ model_root: ./checkpoints/
+ model_structure: parallel
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.2
+ net_regularizer: 0
+ num_cross_layers: 5
+ num_experts: 4
+ num_workers: 3
+ optimizer: adam
+ parallel_dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ stacked_dnn_hidden_units: [500, 500, 500]
+ task: binary_classification
+ use_features: null
+ use_low_rank_mixture: false
+ verbose: 1
diff --git a/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/README.md b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/README.md
new file mode 100644
index 00000000..4043325a
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/README.md
@@ -0,0 +1,234 @@
+## DCNv2_criteo_x4_001
+
+A hands-on guide to run the DCNv2 model on the Criteo_x4 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+
+| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
+|:-----------------------------:|:-----------:|:--------:|:--------:|-------|
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.7.10
+ pytorch: 1.10.2+cu102
+ pandas: 1.1.5
+ numpy: 1.19.5
+ scipy: 1.5.2
+ sklearn: 0.22.1
+ pyyaml: 6.0.1
+ h5py: 2.8.0
+ tqdm: 4.64.0
+ keras_preprocessing: 1.1.2
+ fuxictr: 2.2.0
+ ```
+
+### Dataset
+Please refer to [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4) to get the dataset details.
+
+### Code
+
+We use the [DCNv2](https://github.com/reczoo/FuxiCTR/tree/v2.2.0/model_zoo/DCNv2) model code from [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/tree/v2.2.0) for this experiment.
+
+Running steps:
+
+1. Download [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.2.0.zip) and install all the dependencies listed in the [environments](#environments).
+
+ ```bash
+ pip uninstall fuxictr
+ pip install fuxictr==2.2.0
+ ```
+
+2. Create a data directory and put the downloaded data files in `../data/Criteo/Criteo_x4`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DCNv2_criteo_x4_tuner_config_01](./DCNv2_criteo_x4_tuner_config_01). Please make sure that the data paths in `dataset_config.yaml` are correctly set.
+
+4. Run the following script to start training and evaluation.
+
+ ```bash
+ cd FuxiCTR/model_zoo/DCNv2
+ nohup python run_expid.py --config YOUR_PATH/DCNv2/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_005_c2376d55 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.814514 | 0.437631 |
+
+
+### Logs
+```python
+2024-02-19 11:28:52,376 P3740343 INFO Params: {
+ "batch_norm": "True",
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x4_001_a5e05ce7",
+ "debug_mode": "False",
+ "dnn_activations": "relu",
+ "early_stop_patience": "2",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'fill_na': 0, 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'preprocess': 'convert_to_bucket', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'fill_na': '', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "feature_config": "None",
+ "feature_specs": "None",
+ "gpu": "4",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'Label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "low_rank": "32",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "10",
+ "model": "DCNv2",
+ "model_id": "DCNv2_criteo_x4_001_005_c2376d55",
+ "model_root": "./checkpoints/",
+ "model_structure": "parallel",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0.1",
+ "net_regularizer": "0",
+ "num_cross_layers": "3",
+ "num_experts": "4",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "parallel_dnn_hidden_units": "[1000, 1000, 1000, 1000, 1000]",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "stacked_dnn_hidden_units": "[500, 500, 500]",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x4/test.csv",
+ "train_data": "../data/Criteo/Criteo_x4/train.csv",
+ "use_features": "None",
+ "use_low_rank_mixture": "False",
+ "valid_data": "../data/Criteo/Criteo_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-19 11:28:52,376 P3740343 INFO Set up feature processor...
+2024-02-19 11:28:52,377 P3740343 WARNING Skip rebuilding ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-19 11:28:52,377 P3740343 INFO Load feature_map from json: ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json
+2024-02-19 11:28:52,377 P3740343 INFO Set column index...
+2024-02-19 11:28:52,377 P3740343 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 1446}",
+ "C10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 39530}",
+ "C11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5131}",
+ "C12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 156656}",
+ "C13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 3176}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 28}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11043}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 148913}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 12}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4560}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2003}",
+ "C2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 554}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 154564}",
+ "C22": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C23": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 17}",
+ "C24": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 53031}",
+ "C25": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 82}",
+ "C26": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 40955}",
+ "C3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 157339}",
+ "C4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 117822}",
+ "C5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 306}",
+ "C6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11882}",
+ "C8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 630}",
+ "C9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "I1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 44}",
+ "I10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "I11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "I12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 37}",
+ "I13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 72}",
+ "I2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 99}",
+ "I3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 122}",
+ "I4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 41}",
+ "I5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 220}",
+ "I6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 112}",
+ "I7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 80}",
+ "I8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 69}",
+ "I9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 92}"
+}
+2024-02-19 11:28:57,677 P3740343 INFO Total number of parameters: 20382577.
+2024-02-19 11:28:57,677 P3740343 INFO Loading datasets...
+2024-02-19 11:29:33,844 P3740343 INFO Train samples: total/36672493, blocks/1
+2024-02-19 11:29:38,337 P3740343 INFO Validation samples: total/4584062, blocks/1
+2024-02-19 11:29:38,337 P3740343 INFO Loading train and validation data done.
+2024-02-19 11:29:38,338 P3740343 INFO Start training: 3668 batches/epoch
+2024-02-19 11:29:38,338 P3740343 INFO ************ Epoch=1 start ************
+2024-02-19 11:34:45,532 P3740343 INFO Train loss: 0.458952
+2024-02-19 11:34:45,533 P3740343 INFO Evaluation @epoch 1 - batch 3668:
+2024-02-19 11:35:00,517 P3740343 INFO [Metrics] AUC: 0.806450
+2024-02-19 11:35:00,521 P3740343 INFO Save best model: monitor(max)=0.806450
+2024-02-19 11:35:00,679 P3740343 INFO ************ Epoch=1 end ************
+2024-02-19 11:40:08,747 P3740343 INFO Train loss: 0.451408
+2024-02-19 11:40:08,747 P3740343 INFO Evaluation @epoch 2 - batch 3668:
+2024-02-19 11:40:24,340 P3740343 INFO [Metrics] AUC: 0.809079
+2024-02-19 11:40:24,344 P3740343 INFO Save best model: monitor(max)=0.809079
+2024-02-19 11:40:24,518 P3740343 INFO ************ Epoch=2 end ************
+2024-02-19 11:45:34,026 P3740343 INFO Train loss: 0.449765
+2024-02-19 11:45:34,027 P3740343 INFO Evaluation @epoch 3 - batch 3668:
+2024-02-19 11:45:49,178 P3740343 INFO [Metrics] AUC: 0.810513
+2024-02-19 11:45:49,180 P3740343 INFO Save best model: monitor(max)=0.810513
+2024-02-19 11:45:49,358 P3740343 INFO ************ Epoch=3 end ************
+2024-02-19 11:50:55,398 P3740343 INFO Train loss: 0.448540
+2024-02-19 11:50:55,398 P3740343 INFO Evaluation @epoch 4 - batch 3668:
+2024-02-19 11:51:10,992 P3740343 INFO [Metrics] AUC: 0.811169
+2024-02-19 11:51:10,993 P3740343 INFO Save best model: monitor(max)=0.811169
+2024-02-19 11:51:11,171 P3740343 INFO ************ Epoch=4 end ************
+2024-02-19 11:56:19,561 P3740343 INFO Train loss: 0.447629
+2024-02-19 11:56:19,562 P3740343 INFO Evaluation @epoch 5 - batch 3668:
+2024-02-19 11:56:34,608 P3740343 INFO [Metrics] AUC: 0.811757
+2024-02-19 11:56:34,609 P3740343 INFO Save best model: monitor(max)=0.811757
+2024-02-19 11:56:34,790 P3740343 INFO ************ Epoch=5 end ************
+2024-02-19 12:01:40,413 P3740343 INFO Train loss: 0.446832
+2024-02-19 12:01:40,413 P3740343 INFO Evaluation @epoch 6 - batch 3668:
+2024-02-19 12:01:55,503 P3740343 INFO [Metrics] AUC: 0.811747
+2024-02-19 12:01:55,506 P3740343 INFO Monitor(max)=0.811747 STOP!
+2024-02-19 12:01:55,506 P3740343 INFO Reduce learning rate on plateau: 0.000100
+2024-02-19 12:01:55,559 P3740343 INFO ************ Epoch=6 end ************
+2024-02-19 12:07:00,817 P3740343 INFO Train loss: 0.435416
+2024-02-19 12:07:00,817 P3740343 INFO Evaluation @epoch 7 - batch 3668:
+2024-02-19 12:07:15,793 P3740343 INFO [Metrics] AUC: 0.814037
+2024-02-19 12:07:15,794 P3740343 INFO Save best model: monitor(max)=0.814037
+2024-02-19 12:07:15,963 P3740343 INFO ************ Epoch=7 end ************
+2024-02-19 12:12:25,653 P3740343 INFO Train loss: 0.431232
+2024-02-19 12:12:25,654 P3740343 INFO Evaluation @epoch 8 - batch 3668:
+2024-02-19 12:12:40,871 P3740343 INFO [Metrics] AUC: 0.813989
+2024-02-19 12:12:40,872 P3740343 INFO Monitor(max)=0.813989 STOP!
+2024-02-19 12:12:40,872 P3740343 INFO Reduce learning rate on plateau: 0.000010
+2024-02-19 12:12:40,923 P3740343 INFO ************ Epoch=8 end ************
+2024-02-19 12:17:46,267 P3740343 INFO Train loss: 0.427107
+2024-02-19 12:17:46,267 P3740343 INFO Evaluation @epoch 9 - batch 3668:
+2024-02-19 12:18:00,971 P3740343 INFO [Metrics] AUC: 0.813659
+2024-02-19 12:18:00,972 P3740343 INFO Monitor(max)=0.813659 STOP!
+2024-02-19 12:18:00,972 P3740343 INFO Reduce learning rate on plateau: 0.000001
+2024-02-19 12:18:00,972 P3740343 INFO ********* Epoch==9 early stop *********
+2024-02-19 12:18:01,025 P3740343 INFO Training finished.
+2024-02-19 12:18:01,026 P3740343 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/criteo_x4_001_a5e05ce7/DCNv2_criteo_x4_001_005_c2376d55.model
+2024-02-19 12:18:01,096 P3740343 INFO ****** Validation evaluation ******
+2024-02-19 12:18:17,843 P3740343 INFO [Metrics] AUC: 0.814037 - logloss: 0.438087
+2024-02-19 12:18:17,963 P3740343 INFO ******** Test evaluation ********
+2024-02-19 12:18:17,964 P3740343 INFO Loading datasets...
+2024-02-19 12:18:22,506 P3740343 INFO Test samples: total/4584062, blocks/1
+2024-02-19 12:18:22,507 P3740343 INFO Loading test data done.
+2024-02-19 12:18:38,751 P3740343 INFO [Metrics] AUC: 0.814514 - logloss: 0.437631
+
+```
diff --git a/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/environments.txt b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/environments.txt
new file mode 100644
index 00000000..b4567ace
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/environments.txt
@@ -0,0 +1,18 @@
+[Hardware]
+CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+GPU: Tesla V100 32G
+RAM: 755G
+
+[Software]
+CUDA: 10.2
+python: 3.7.10
+pytorch: 1.10.2+cu102
+pandas: 1.1.5
+numpy: 1.19.5
+scipy: 1.5.2
+sklearn: 0.22.1
+pyyaml: 6.0.1
+h5py: 2.8.0
+tqdm: 4.64.0
+keras_preprocessing: 1.1.2
+fuxictr: 2.2.0
\ No newline at end of file
diff --git a/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/results.csv b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/results.csv
new file mode 100644
index 00000000..9802f310
--- /dev/null
+++ b/ranking/ctr/DCNv2/DCNv2_criteo_x4_001/results.csv
@@ -0,0 +1 @@
+ 20240219-121838,[command] python run_expid.py --config Criteo_x4/DCNv2_criteo_x4_001/DCNv2_criteo_x4_tuner_config_01 --expid DCNv2_criteo_x4_001_005_c2376d55 --gpu 4,[exp_id] DCNv2_criteo_x4_001_005_c2376d55,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814037 - logloss: 0.438087,[test] AUC: 0.814514 - logloss: 0.437631
diff --git a/ranking/ctr/DCNv2/DCNv2_frappe_x1/README.md b/ranking/ctr/DCNv2/DCNv2_frappe_x1/README.md
index 005a9d25..0e1bd382 100644
--- a/ranking/ctr/DCNv2/DCNv2_frappe_x1/README.md
+++ b/ranking/ctr/DCNv2/DCNv2_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -36,15 +36,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCNv2/DCNv2_kuaivideo_x1/README.md b/ranking/ctr/DCNv2/DCNv2_kuaivideo_x1/README.md
index 1011d16b..05aeb431 100644
--- a/ranking/ctr/DCNv2/DCNv2_kuaivideo_x1/README.md
+++ b/ranking/ctr/DCNv2/DCNv2_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DCNv2) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DCNv2) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DCNv2/DCNv2_microvideo1.7m_x1/README.md b/ranking/ctr/DCNv2/DCNv2_microvideo1.7m_x1/README.md
index 3f4928f7..47ee77c3 100644
--- a/ranking/ctr/DCNv2/DCNv2_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/DCNv2/DCNv2_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DCNv2) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DCNv2) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DCNv2/DCNv2_movielenslatest_x1/README.md b/ranking/ctr/DCNv2/DCNv2_movielenslatest_x1/README.md
index 380f1586..ff37af3b 100644
--- a/ranking/ctr/DCNv2/DCNv2_movielenslatest_x1/README.md
+++ b/ranking/ctr/DCNv2/DCNv2_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -36,15 +36,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DCNv2.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DCNv2/DCNv2_taobaoad_x1/README.md b/ranking/ctr/DCNv2/DCNv2_taobaoad_x1/README.md
index 44b3c6d4..d8a7f68c 100644
--- a/ranking/ctr/DCNv2/DCNv2_taobaoad_x1/README.md
+++ b/ranking/ctr/DCNv2/DCNv2_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DCNv2 model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [DCNv2](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DCNv2) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DCNv2](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DCNv2) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DESTINE/DESTINE_avazu_x1/README.md b/ranking/ctr/DESTINE/DESTINE_avazu_x1/README.md
index 59938048..d8b93eec 100644
--- a/ranking/ctr/DESTINE/DESTINE_avazu_x1/README.md
+++ b/ranking/ctr/DESTINE/DESTINE_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DESTINE model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [DESTINE](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/DESTINE.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [DESTINE](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/DESTINE.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DESTINE/DESTINE_criteo_x1/README.md b/ranking/ctr/DESTINE/DESTINE_criteo_x1/README.md
index f08f44da..3c0a81a9 100644
--- a/ranking/ctr/DESTINE/DESTINE_criteo_x1/README.md
+++ b/ranking/ctr/DESTINE/DESTINE_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DESTINE model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [DESTINE](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/DESTINE.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [DESTINE](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/DESTINE.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DESTINE/DESTINE_frappe_x1/README.md b/ranking/ctr/DESTINE/DESTINE_frappe_x1/README.md
index 2fb70419..02d98ba4 100644
--- a/ranking/ctr/DESTINE/DESTINE_frappe_x1/README.md
+++ b/ranking/ctr/DESTINE/DESTINE_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DESTINE\DESTINE_frappe_x1 model on the dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [DESTINE\DESTINE_frappe_x1](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/DESTINE\DESTINE_frappe_x1.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [DESTINE\DESTINE_frappe_x1](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/DESTINE\DESTINE_frappe_x1.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DESTINE/DESTINE_movielenslatest_x1/README.md b/ranking/ctr/DESTINE/DESTINE_movielenslatest_x1/README.md
index 0a5fe48c..8dc7d097 100644
--- a/ranking/ctr/DESTINE/DESTINE_movielenslatest_x1/README.md
+++ b/ranking/ctr/DESTINE/DESTINE_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DESTINE model on the MovielensLatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/mast
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [DESTINE](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/DESTINE.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [DESTINE](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/DESTINE.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DIEN/DIEN_amazonelectronics_x1/README.md b/ranking/ctr/DIEN/DIEN_amazonelectronics_x1/README.md
index 0dc08dd5..6b7c52e0 100644
--- a/ranking/ctr/DIEN/DIEN_amazonelectronics_x1/README.md
+++ b/ranking/ctr/DIEN/DIEN_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DIEN model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [DIEN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/DIEN) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [DIEN](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/DIEN) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DIEN/DIEN_kuaivideo_x1/README.md b/ranking/ctr/DIEN/DIEN_kuaivideo_x1/README.md
index d015118d..ca0383ec 100644
--- a/ranking/ctr/DIEN/DIEN_kuaivideo_x1/README.md
+++ b/ranking/ctr/DIEN/DIEN_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DIEN model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [DIEN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/DIEN) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [DIEN](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/DIEN) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DIEN/DIEN_microvideo1.7m_x1/README.md b/ranking/ctr/DIEN/DIEN_microvideo1.7m_x1/README.md
index 9d9be59c..64a14319 100644
--- a/ranking/ctr/DIEN/DIEN_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/DIEN/DIEN_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DIEN model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [DIEN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/DIEN) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [DIEN](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/DIEN) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DIEN/DIEN_taobaoad_x1/README.md b/ranking/ctr/DIEN/DIEN_taobaoad_x1/README.md
index c1584d3e..d329ea0a 100644
--- a/ranking/ctr/DIEN/DIEN_taobaoad_x1/README.md
+++ b/ranking/ctr/DIEN/DIEN_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DIEN model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [DIEN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/DIEN) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [DIEN](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/DIEN) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DIN/DIN_amazonelectronics_x1/README.md b/ranking/ctr/DIN/DIN_amazonelectronics_x1/README.md
index dce87c49..7c92d698 100644
--- a/ranking/ctr/DIN/DIN_amazonelectronics_x1/README.md
+++ b/ranking/ctr/DIN/DIN_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DIN model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [DIN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DIN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DIN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DIN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DIN/DIN_kuaivideo_x1/README.md b/ranking/ctr/DIN/DIN_kuaivideo_x1/README.md
index 19a9c01c..fdd1a7ba 100644
--- a/ranking/ctr/DIN/DIN_kuaivideo_x1/README.md
+++ b/ranking/ctr/DIN/DIN_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DIN model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [DIN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DIN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DIN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DIN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DIN/DIN_microvideo1.7m_x1/README.md b/ranking/ctr/DIN/DIN_microvideo1.7m_x1/README.md
index b4bb96a2..9e6e5135 100644
--- a/ranking/ctr/DIN/DIN_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/DIN/DIN_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DIN model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [DIN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DIN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DIN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DIN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DIN/DIN_taobaoad_x1/README.md b/ranking/ctr/DIN/DIN_taobaoad_x1/README.md
index e087b09e..06c885bd 100644
--- a/ranking/ctr/DIN/DIN_taobaoad_x1/README.md
+++ b/ranking/ctr/DIN/DIN_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DIN model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [DIN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DIN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DIN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DIN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DLRM/DLRM_avazu_x1/README.md b/ranking/ctr/DLRM/DLRM_avazu_x1/README.md
index 5e9d8e22..e0af3b0a 100644
--- a/ranking/ctr/DLRM/DLRM_avazu_x1/README.md
+++ b/ranking/ctr/DLRM/DLRM_avazu_x1/README.md
@@ -1,184 +1,184 @@
-## DLRM_avazu_x1
-
-A hands-on guide to run the DLRM model on the Avazu_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
- GPU: Tesla V100 32G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.2
- python: 3.6.4
- pytorch: 1.0.0
- pandas: 0.22.0
- numpy: 1.19.2
- scipy: 1.5.4
- sklearn: 0.22.1
- pyyaml: 5.4.1
- h5py: 2.8.0
- tqdm: 4.60.0
- fuxictr: 1.2.1
-
- ```
-
-### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu#Avazu_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DLRM](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DLRM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Avazu/Avazu_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DLRM_avazu_x1_tuner_config_01](./DLRM_avazu_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd DLRM_avazu_x1
- nohup python run_expid.py --config ./DLRM_avazu_x1_tuner_config_01 --expid DLRM_avazu_x1_015_cf6fdabe --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.763934 | 0.366904 |
-
-
-### Logs
-```python
-2022-05-27 19:47:35,352 P101383 INFO {
- "batch_norm": "True",
- "batch_size": "4096",
- "bottom_mlp_activations": "ReLU",
- "bottom_mlp_dropout": "0.3",
- "bottom_mlp_units": "[400, 400, 400]",
- "data_format": "csv",
- "data_root": "../data/Avazu/",
- "dataset_id": "avazu_x1_3fb65689",
- "debug": "False",
- "embedding_dim": "10",
- "embedding_regularizer": "0.01",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['feat_1', 'feat_2', 'feat_3', 'feat_4', 'feat_5', 'feat_6', 'feat_7', 'feat_8', 'feat_9', 'feat_10', 'feat_11', 'feat_12', 'feat_13', 'feat_14', 'feat_15', 'feat_16', 'feat_17', 'feat_18', 'feat_19', 'feat_20', 'feat_21', 'feat_22'], 'type': 'categorical'}]",
- "gpu": "6",
- "interaction_op": "dot",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "DLRM",
- "model_id": "DLRM_avazu_x1_015_cf6fdabe",
- "model_root": "./Avazu/DLRM_avazu_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_regularizer": "0",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Avazu/Avazu_x1/test.csv",
- "top_mlp_activations": "ReLU",
- "top_mlp_dropout": "0.3",
- "top_mlp_units": "[400, 400, 400]",
- "train_data": "../data/Avazu/Avazu_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/Avazu/Avazu_x1/valid.csv",
- "verbose": "0",
- "version": "pytorch"
-}
-2022-05-27 19:47:35,353 P101383 INFO Set up feature encoder...
-2022-05-27 19:47:35,353 P101383 INFO Load feature_map from json: ../data/Avazu/avazu_x1_3fb65689/feature_map.json
-2022-05-27 19:47:35,353 P101383 INFO Loading data...
-2022-05-27 19:47:35,355 P101383 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/train.h5
-2022-05-27 19:47:37,977 P101383 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/valid.h5
-2022-05-27 19:47:38,317 P101383 INFO Train samples: total/28300276, pos/4953382, neg/23346894, ratio/17.50%, blocks/1
-2022-05-27 19:47:38,317 P101383 INFO Validation samples: total/4042897, pos/678699, neg/3364198, ratio/16.79%, blocks/1
-2022-05-27 19:47:38,317 P101383 INFO Loading train data done.
-2022-05-27 19:47:44,625 P101383 INFO Total number of parameters: 13402391.
-2022-05-27 19:47:44,625 P101383 INFO Start training: 6910 batches/epoch
-2022-05-27 19:47:44,625 P101383 INFO ************ Epoch=1 start ************
-2022-05-27 19:58:31,628 P101383 INFO [Metrics] AUC: 0.731686 - logloss: 0.409691
-2022-05-27 19:58:31,631 P101383 INFO Save best model: monitor(max): 0.731686
-2022-05-27 19:58:31,853 P101383 INFO --- 6910/6910 batches finished ---
-2022-05-27 19:58:31,894 P101383 INFO Train loss: 0.432078
-2022-05-27 19:58:31,894 P101383 INFO ************ Epoch=1 end ************
-2022-05-27 20:09:15,651 P101383 INFO [Metrics] AUC: 0.732220 - logloss: 0.409117
-2022-05-27 20:09:15,653 P101383 INFO Save best model: monitor(max): 0.732220
-2022-05-27 20:09:15,726 P101383 INFO --- 6910/6910 batches finished ---
-2022-05-27 20:09:15,770 P101383 INFO Train loss: 0.429395
-2022-05-27 20:09:15,770 P101383 INFO ************ Epoch=2 end ************
-2022-05-27 20:20:01,152 P101383 INFO [Metrics] AUC: 0.733731 - logloss: 0.404299
-2022-05-27 20:20:01,154 P101383 INFO Save best model: monitor(max): 0.733731
-2022-05-27 20:20:01,233 P101383 INFO --- 6910/6910 batches finished ---
-2022-05-27 20:20:01,280 P101383 INFO Train loss: 0.429940
-2022-05-27 20:20:01,280 P101383 INFO ************ Epoch=3 end ************
-2022-05-27 20:30:46,927 P101383 INFO [Metrics] AUC: 0.735620 - logloss: 0.406103
-2022-05-27 20:30:46,930 P101383 INFO Save best model: monitor(max): 0.735620
-2022-05-27 20:30:47,008 P101383 INFO --- 6910/6910 batches finished ---
-2022-05-27 20:30:47,057 P101383 INFO Train loss: 0.430528
-2022-05-27 20:30:47,057 P101383 INFO ************ Epoch=4 end ************
-2022-05-27 20:41:32,465 P101383 INFO [Metrics] AUC: 0.733933 - logloss: 0.403524
-2022-05-27 20:41:32,468 P101383 INFO Monitor(max) STOP: 0.733933 !
-2022-05-27 20:41:32,468 P101383 INFO Reduce learning rate on plateau: 0.000100
-2022-05-27 20:41:32,468 P101383 INFO --- 6910/6910 batches finished ---
-2022-05-27 20:41:32,514 P101383 INFO Train loss: 0.430711
-2022-05-27 20:41:32,515 P101383 INFO ************ Epoch=5 end ************
-2022-05-27 20:52:16,107 P101383 INFO [Metrics] AUC: 0.745650 - logloss: 0.396405
-2022-05-27 20:52:16,109 P101383 INFO Save best model: monitor(max): 0.745650
-2022-05-27 20:52:16,176 P101383 INFO --- 6910/6910 batches finished ---
-2022-05-27 20:52:16,216 P101383 INFO Train loss: 0.403793
-2022-05-27 20:52:16,216 P101383 INFO ************ Epoch=6 end ************
-2022-05-27 21:03:00,204 P101383 INFO [Metrics] AUC: 0.744514 - logloss: 0.396459
-2022-05-27 21:03:00,206 P101383 INFO Monitor(max) STOP: 0.744514 !
-2022-05-27 21:03:00,206 P101383 INFO Reduce learning rate on plateau: 0.000010
-2022-05-27 21:03:00,206 P101383 INFO --- 6910/6910 batches finished ---
-2022-05-27 21:03:00,246 P101383 INFO Train loss: 0.403799
-2022-05-27 21:03:00,246 P101383 INFO ************ Epoch=7 end ************
-2022-05-27 21:08:12,038 P101383 INFO [Metrics] AUC: 0.741671 - logloss: 0.398020
-2022-05-27 21:08:12,040 P101383 INFO Monitor(max) STOP: 0.741671 !
-2022-05-27 21:08:12,040 P101383 INFO Reduce learning rate on plateau: 0.000001
-2022-05-27 21:08:12,041 P101383 INFO Early stopping at epoch=8
-2022-05-27 21:08:12,041 P101383 INFO --- 6910/6910 batches finished ---
-2022-05-27 21:08:12,077 P101383 INFO Train loss: 0.395465
-2022-05-27 21:08:12,077 P101383 INFO Training finished.
-2022-05-27 21:08:12,077 P101383 INFO Load best model: /cache/FuxiCTR/benchmarks/Avazu/DLRM_avazu_x1/avazu_x1_3fb65689/DLRM_avazu_x1_015_cf6fdabe.model
-2022-05-27 21:08:15,153 P101383 INFO ****** Validation evaluation ******
-2022-05-27 21:08:27,304 P101383 INFO [Metrics] AUC: 0.745650 - logloss: 0.396405
-2022-05-27 21:08:27,397 P101383 INFO ******** Test evaluation ********
-2022-05-27 21:08:27,397 P101383 INFO Loading data...
-2022-05-27 21:08:27,398 P101383 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/test.h5
-2022-05-27 21:08:28,020 P101383 INFO Test samples: total/8085794, pos/1232985, neg/6852809, ratio/15.25%, blocks/1
-2022-05-27 21:08:28,020 P101383 INFO Loading test data done.
-2022-05-27 21:08:54,796 P101383 INFO [Metrics] AUC: 0.763934 - logloss: 0.366904
-
-```
+## DLRM_avazu_x1
+
+A hands-on guide to run the DLRM model on the Avazu_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.6.4
+ pytorch: 1.0.0
+ pandas: 0.22.0
+ numpy: 1.19.2
+ scipy: 1.5.4
+ sklearn: 0.22.1
+ pyyaml: 5.4.1
+ h5py: 2.8.0
+ tqdm: 4.60.0
+ fuxictr: 1.2.1
+
+ ```
+
+### Dataset
+Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu#Avazu_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DLRM](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DLRM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Avazu/Avazu_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DLRM_avazu_x1_tuner_config_01](./DLRM_avazu_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd DLRM_avazu_x1
+ nohup python run_expid.py --config ./DLRM_avazu_x1_tuner_config_01 --expid DLRM_avazu_x1_015_cf6fdabe --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.763934 | 0.366904 |
+
+
+### Logs
+```python
+2022-05-27 19:47:35,352 P101383 INFO {
+ "batch_norm": "True",
+ "batch_size": "4096",
+ "bottom_mlp_activations": "ReLU",
+ "bottom_mlp_dropout": "0.3",
+ "bottom_mlp_units": "[400, 400, 400]",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x1_3fb65689",
+ "debug": "False",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.01",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['feat_1', 'feat_2', 'feat_3', 'feat_4', 'feat_5', 'feat_6', 'feat_7', 'feat_8', 'feat_9', 'feat_10', 'feat_11', 'feat_12', 'feat_13', 'feat_14', 'feat_15', 'feat_16', 'feat_17', 'feat_18', 'feat_19', 'feat_20', 'feat_21', 'feat_22'], 'type': 'categorical'}]",
+ "gpu": "6",
+ "interaction_op": "dot",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "DLRM",
+ "model_id": "DLRM_avazu_x1_015_cf6fdabe",
+ "model_root": "./Avazu/DLRM_avazu_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x1/test.csv",
+ "top_mlp_activations": "ReLU",
+ "top_mlp_dropout": "0.3",
+ "top_mlp_units": "[400, 400, 400]",
+ "train_data": "../data/Avazu/Avazu_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/Avazu/Avazu_x1/valid.csv",
+ "verbose": "0",
+ "version": "pytorch"
+}
+2022-05-27 19:47:35,353 P101383 INFO Set up feature encoder...
+2022-05-27 19:47:35,353 P101383 INFO Load feature_map from json: ../data/Avazu/avazu_x1_3fb65689/feature_map.json
+2022-05-27 19:47:35,353 P101383 INFO Loading data...
+2022-05-27 19:47:35,355 P101383 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/train.h5
+2022-05-27 19:47:37,977 P101383 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/valid.h5
+2022-05-27 19:47:38,317 P101383 INFO Train samples: total/28300276, pos/4953382, neg/23346894, ratio/17.50%, blocks/1
+2022-05-27 19:47:38,317 P101383 INFO Validation samples: total/4042897, pos/678699, neg/3364198, ratio/16.79%, blocks/1
+2022-05-27 19:47:38,317 P101383 INFO Loading train data done.
+2022-05-27 19:47:44,625 P101383 INFO Total number of parameters: 13402391.
+2022-05-27 19:47:44,625 P101383 INFO Start training: 6910 batches/epoch
+2022-05-27 19:47:44,625 P101383 INFO ************ Epoch=1 start ************
+2022-05-27 19:58:31,628 P101383 INFO [Metrics] AUC: 0.731686 - logloss: 0.409691
+2022-05-27 19:58:31,631 P101383 INFO Save best model: monitor(max): 0.731686
+2022-05-27 19:58:31,853 P101383 INFO --- 6910/6910 batches finished ---
+2022-05-27 19:58:31,894 P101383 INFO Train loss: 0.432078
+2022-05-27 19:58:31,894 P101383 INFO ************ Epoch=1 end ************
+2022-05-27 20:09:15,651 P101383 INFO [Metrics] AUC: 0.732220 - logloss: 0.409117
+2022-05-27 20:09:15,653 P101383 INFO Save best model: monitor(max): 0.732220
+2022-05-27 20:09:15,726 P101383 INFO --- 6910/6910 batches finished ---
+2022-05-27 20:09:15,770 P101383 INFO Train loss: 0.429395
+2022-05-27 20:09:15,770 P101383 INFO ************ Epoch=2 end ************
+2022-05-27 20:20:01,152 P101383 INFO [Metrics] AUC: 0.733731 - logloss: 0.404299
+2022-05-27 20:20:01,154 P101383 INFO Save best model: monitor(max): 0.733731
+2022-05-27 20:20:01,233 P101383 INFO --- 6910/6910 batches finished ---
+2022-05-27 20:20:01,280 P101383 INFO Train loss: 0.429940
+2022-05-27 20:20:01,280 P101383 INFO ************ Epoch=3 end ************
+2022-05-27 20:30:46,927 P101383 INFO [Metrics] AUC: 0.735620 - logloss: 0.406103
+2022-05-27 20:30:46,930 P101383 INFO Save best model: monitor(max): 0.735620
+2022-05-27 20:30:47,008 P101383 INFO --- 6910/6910 batches finished ---
+2022-05-27 20:30:47,057 P101383 INFO Train loss: 0.430528
+2022-05-27 20:30:47,057 P101383 INFO ************ Epoch=4 end ************
+2022-05-27 20:41:32,465 P101383 INFO [Metrics] AUC: 0.733933 - logloss: 0.403524
+2022-05-27 20:41:32,468 P101383 INFO Monitor(max) STOP: 0.733933 !
+2022-05-27 20:41:32,468 P101383 INFO Reduce learning rate on plateau: 0.000100
+2022-05-27 20:41:32,468 P101383 INFO --- 6910/6910 batches finished ---
+2022-05-27 20:41:32,514 P101383 INFO Train loss: 0.430711
+2022-05-27 20:41:32,515 P101383 INFO ************ Epoch=5 end ************
+2022-05-27 20:52:16,107 P101383 INFO [Metrics] AUC: 0.745650 - logloss: 0.396405
+2022-05-27 20:52:16,109 P101383 INFO Save best model: monitor(max): 0.745650
+2022-05-27 20:52:16,176 P101383 INFO --- 6910/6910 batches finished ---
+2022-05-27 20:52:16,216 P101383 INFO Train loss: 0.403793
+2022-05-27 20:52:16,216 P101383 INFO ************ Epoch=6 end ************
+2022-05-27 21:03:00,204 P101383 INFO [Metrics] AUC: 0.744514 - logloss: 0.396459
+2022-05-27 21:03:00,206 P101383 INFO Monitor(max) STOP: 0.744514 !
+2022-05-27 21:03:00,206 P101383 INFO Reduce learning rate on plateau: 0.000010
+2022-05-27 21:03:00,206 P101383 INFO --- 6910/6910 batches finished ---
+2022-05-27 21:03:00,246 P101383 INFO Train loss: 0.403799
+2022-05-27 21:03:00,246 P101383 INFO ************ Epoch=7 end ************
+2022-05-27 21:08:12,038 P101383 INFO [Metrics] AUC: 0.741671 - logloss: 0.398020
+2022-05-27 21:08:12,040 P101383 INFO Monitor(max) STOP: 0.741671 !
+2022-05-27 21:08:12,040 P101383 INFO Reduce learning rate on plateau: 0.000001
+2022-05-27 21:08:12,041 P101383 INFO Early stopping at epoch=8
+2022-05-27 21:08:12,041 P101383 INFO --- 6910/6910 batches finished ---
+2022-05-27 21:08:12,077 P101383 INFO Train loss: 0.395465
+2022-05-27 21:08:12,077 P101383 INFO Training finished.
+2022-05-27 21:08:12,077 P101383 INFO Load best model: /cache/FuxiCTR/benchmarks/Avazu/DLRM_avazu_x1/avazu_x1_3fb65689/DLRM_avazu_x1_015_cf6fdabe.model
+2022-05-27 21:08:15,153 P101383 INFO ****** Validation evaluation ******
+2022-05-27 21:08:27,304 P101383 INFO [Metrics] AUC: 0.745650 - logloss: 0.396405
+2022-05-27 21:08:27,397 P101383 INFO ******** Test evaluation ********
+2022-05-27 21:08:27,397 P101383 INFO Loading data...
+2022-05-27 21:08:27,398 P101383 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/test.h5
+2022-05-27 21:08:28,020 P101383 INFO Test samples: total/8085794, pos/1232985, neg/6852809, ratio/15.25%, blocks/1
+2022-05-27 21:08:28,020 P101383 INFO Loading test data done.
+2022-05-27 21:08:54,796 P101383 INFO [Metrics] AUC: 0.763934 - logloss: 0.366904
+
+```
diff --git a/ranking/ctr/DLRM/DLRM_criteo_x1/README.md b/ranking/ctr/DLRM/DLRM_criteo_x1/README.md
index 3b4de1fb..b708c708 100644
--- a/ranking/ctr/DLRM/DLRM_criteo_x1/README.md
+++ b/ranking/ctr/DLRM/DLRM_criteo_x1/README.md
@@ -1,229 +1,229 @@
-## DLRM_criteo_x1
-
-A hands-on guide to run the DLRM model on the Criteo_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
- GPU: Tesla V100 32G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.2
- python: 3.6.4
- pytorch: 1.0.0
- pandas: 0.22.0
- numpy: 1.19.2
- scipy: 1.5.4
- sklearn: 0.22.1
- pyyaml: 5.4.1
- h5py: 2.8.0
- tqdm: 4.60.0
- fuxictr: 1.2.1
-
- ```
-
-### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo#Criteo_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DLRM](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DLRM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Criteo/Criteo_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DLRM_criteo_x1_tuner_config_02](./DLRM_criteo_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd DLRM_criteo_x1
- nohup python run_expid.py --config ./DLRM_criteo_x1_tuner_config_02 --expid DLRM_criteo_x1_001_4d897285 --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.813804 | 0.438155 |
-
-
-### Logs
-```python
-2022-05-29 15:05:50,295 P7851 INFO {
- "batch_norm": "True",
- "batch_size": "4096",
- "bottom_mlp_activations": "ReLU",
- "bottom_mlp_dropout": "0",
- "bottom_mlp_units": "[400, 400, 400]",
- "data_format": "csv",
- "data_root": "../data/Criteo/",
- "dataset_id": "criteo_x1_7b681156",
- "debug": "False",
- "embedding_dim": "10",
- "embedding_regularizer": "1e-05",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'type': 'numeric'}, {'active': True, 'dtype': 'float', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
- "gpu": "0",
- "interaction_op": "dot",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "DLRM",
- "model_id": "DLRM_criteo_x1_001_4d897285",
- "model_root": "./Criteo/DLRM_criteo_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_regularizer": "0",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Criteo/Criteo_x1/test.csv",
- "top_mlp_activations": "ReLU",
- "top_mlp_dropout": "0.2",
- "top_mlp_units": "[400, 400, 400]",
- "train_data": "../data/Criteo/Criteo_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/Criteo/Criteo_x1/valid.csv",
- "verbose": "0",
- "version": "pytorch"
-}
-2022-05-29 15:05:50,295 P7851 INFO Set up feature encoder...
-2022-05-29 15:05:50,295 P7851 INFO Load feature_map from json: ../data/Criteo/criteo_x1_7b681156/feature_map.json
-2022-05-29 15:05:50,296 P7851 INFO Loading data...
-2022-05-29 15:05:50,297 P7851 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/train.h5
-2022-05-29 15:05:55,450 P7851 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/valid.h5
-2022-05-29 15:05:56,805 P7851 INFO Train samples: total/33003326, pos/8456369, neg/24546957, ratio/25.62%, blocks/1
-2022-05-29 15:05:56,805 P7851 INFO Validation samples: total/8250124, pos/2114300, neg/6135824, ratio/25.63%, blocks/1
-2022-05-29 15:05:56,805 P7851 INFO Loading train data done.
-2022-05-29 15:06:02,843 P7851 INFO Total number of parameters: 21664241.
-2022-05-29 15:06:02,843 P7851 INFO Start training: 8058 batches/epoch
-2022-05-29 15:06:02,844 P7851 INFO ************ Epoch=1 start ************
-2022-05-29 15:21:26,950 P7851 INFO [Metrics] AUC: 0.804187 - logloss: 0.447040
-2022-05-29 15:21:26,952 P7851 INFO Save best model: monitor(max): 0.804187
-2022-05-29 15:21:27,059 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 15:21:27,104 P7851 INFO Train loss: 0.461660
-2022-05-29 15:21:27,104 P7851 INFO ************ Epoch=1 end ************
-2022-05-29 15:36:49,337 P7851 INFO [Metrics] AUC: 0.806524 - logloss: 0.445125
-2022-05-29 15:36:49,339 P7851 INFO Save best model: monitor(max): 0.806524
-2022-05-29 15:36:49,463 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 15:36:49,502 P7851 INFO Train loss: 0.456277
-2022-05-29 15:36:49,503 P7851 INFO ************ Epoch=2 end ************
-2022-05-29 15:52:12,386 P7851 INFO [Metrics] AUC: 0.807615 - logloss: 0.443908
-2022-05-29 15:52:12,387 P7851 INFO Save best model: monitor(max): 0.807615
-2022-05-29 15:52:12,513 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 15:52:12,573 P7851 INFO Train loss: 0.454813
-2022-05-29 15:52:12,573 P7851 INFO ************ Epoch=3 end ************
-2022-05-29 16:07:31,604 P7851 INFO [Metrics] AUC: 0.807956 - logloss: 0.443703
-2022-05-29 16:07:31,605 P7851 INFO Save best model: monitor(max): 0.807956
-2022-05-29 16:07:31,731 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 16:07:31,779 P7851 INFO Train loss: 0.454118
-2022-05-29 16:07:31,779 P7851 INFO ************ Epoch=4 end ************
-2022-05-29 16:22:48,793 P7851 INFO [Metrics] AUC: 0.808651 - logloss: 0.442900
-2022-05-29 16:22:48,794 P7851 INFO Save best model: monitor(max): 0.808651
-2022-05-29 16:22:48,932 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 16:22:48,978 P7851 INFO Train loss: 0.453736
-2022-05-29 16:22:48,978 P7851 INFO ************ Epoch=5 end ************
-2022-05-29 16:38:05,989 P7851 INFO [Metrics] AUC: 0.808663 - logloss: 0.442960
-2022-05-29 16:38:05,990 P7851 INFO Save best model: monitor(max): 0.808663
-2022-05-29 16:38:06,115 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 16:38:06,159 P7851 INFO Train loss: 0.453420
-2022-05-29 16:38:06,160 P7851 INFO ************ Epoch=6 end ************
-2022-05-29 16:53:22,282 P7851 INFO [Metrics] AUC: 0.809122 - logloss: 0.442572
-2022-05-29 16:53:22,284 P7851 INFO Save best model: monitor(max): 0.809122
-2022-05-29 16:53:22,386 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 16:53:22,425 P7851 INFO Train loss: 0.453186
-2022-05-29 16:53:22,425 P7851 INFO ************ Epoch=7 end ************
-2022-05-29 17:08:38,105 P7851 INFO [Metrics] AUC: 0.809201 - logloss: 0.442405
-2022-05-29 17:08:38,106 P7851 INFO Save best model: monitor(max): 0.809201
-2022-05-29 17:08:38,207 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 17:08:38,245 P7851 INFO Train loss: 0.453003
-2022-05-29 17:08:38,246 P7851 INFO ************ Epoch=8 end ************
-2022-05-29 17:23:54,309 P7851 INFO [Metrics] AUC: 0.809330 - logloss: 0.442346
-2022-05-29 17:23:54,310 P7851 INFO Save best model: monitor(max): 0.809330
-2022-05-29 17:23:54,410 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 17:23:54,448 P7851 INFO Train loss: 0.452831
-2022-05-29 17:23:54,448 P7851 INFO ************ Epoch=9 end ************
-2022-05-29 17:39:10,920 P7851 INFO [Metrics] AUC: 0.809664 - logloss: 0.441991
-2022-05-29 17:39:10,922 P7851 INFO Save best model: monitor(max): 0.809664
-2022-05-29 17:39:11,024 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 17:39:11,062 P7851 INFO Train loss: 0.452682
-2022-05-29 17:39:11,062 P7851 INFO ************ Epoch=10 end ************
-2022-05-29 17:54:24,245 P7851 INFO [Metrics] AUC: 0.809675 - logloss: 0.441967
-2022-05-29 17:54:24,246 P7851 INFO Save best model: monitor(max): 0.809675
-2022-05-29 17:54:24,355 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 17:54:24,394 P7851 INFO Train loss: 0.452588
-2022-05-29 17:54:24,394 P7851 INFO ************ Epoch=11 end ************
-2022-05-29 18:09:40,265 P7851 INFO [Metrics] AUC: 0.809607 - logloss: 0.442167
-2022-05-29 18:09:40,266 P7851 INFO Monitor(max) STOP: 0.809607 !
-2022-05-29 18:09:40,267 P7851 INFO Reduce learning rate on plateau: 0.000100
-2022-05-29 18:09:40,267 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 18:09:40,307 P7851 INFO Train loss: 0.452451
-2022-05-29 18:09:40,307 P7851 INFO ************ Epoch=12 end ************
-2022-05-29 18:17:07,207 P7851 INFO [Metrics] AUC: 0.812970 - logloss: 0.438972
-2022-05-29 18:17:07,208 P7851 INFO Save best model: monitor(max): 0.812970
-2022-05-29 18:17:07,302 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 18:17:07,340 P7851 INFO Train loss: 0.441606
-2022-05-29 18:17:07,341 P7851 INFO ************ Epoch=13 end ************
-2022-05-29 18:24:30,663 P7851 INFO [Metrics] AUC: 0.813400 - logloss: 0.438582
-2022-05-29 18:24:30,665 P7851 INFO Save best model: monitor(max): 0.813400
-2022-05-29 18:24:30,772 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 18:24:30,823 P7851 INFO Train loss: 0.437457
-2022-05-29 18:24:30,823 P7851 INFO ************ Epoch=14 end ************
-2022-05-29 18:31:50,965 P7851 INFO [Metrics] AUC: 0.813497 - logloss: 0.438596
-2022-05-29 18:31:50,966 P7851 INFO Save best model: monitor(max): 0.813497
-2022-05-29 18:31:51,061 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 18:31:51,101 P7851 INFO Train loss: 0.435468
-2022-05-29 18:31:51,101 P7851 INFO ************ Epoch=15 end ************
-2022-05-29 18:39:13,073 P7851 INFO [Metrics] AUC: 0.813298 - logloss: 0.438926
-2022-05-29 18:39:13,075 P7851 INFO Monitor(max) STOP: 0.813298 !
-2022-05-29 18:39:13,075 P7851 INFO Reduce learning rate on plateau: 0.000010
-2022-05-29 18:39:13,075 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 18:39:13,115 P7851 INFO Train loss: 0.433854
-2022-05-29 18:39:13,115 P7851 INFO ************ Epoch=16 end ************
-2022-05-29 18:46:31,049 P7851 INFO [Metrics] AUC: 0.812458 - logloss: 0.440054
-2022-05-29 18:46:31,051 P7851 INFO Monitor(max) STOP: 0.812458 !
-2022-05-29 18:46:31,051 P7851 INFO Reduce learning rate on plateau: 0.000001
-2022-05-29 18:46:31,051 P7851 INFO Early stopping at epoch=17
-2022-05-29 18:46:31,051 P7851 INFO --- 8058/8058 batches finished ---
-2022-05-29 18:46:31,090 P7851 INFO Train loss: 0.428564
-2022-05-29 18:46:31,090 P7851 INFO Training finished.
-2022-05-29 18:46:31,090 P7851 INFO Load best model: /cache/FuxiCTR/benchmarks/Criteo/DLRM_criteo_x1/criteo_x1_7b681156/DLRM_criteo_x1_001_4d897285.model
-2022-05-29 18:46:31,168 P7851 INFO ****** Validation evaluation ******
-2022-05-29 18:46:55,691 P7851 INFO [Metrics] AUC: 0.813497 - logloss: 0.438596
-2022-05-29 18:46:55,768 P7851 INFO ******** Test evaluation ********
-2022-05-29 18:46:55,768 P7851 INFO Loading data...
-2022-05-29 18:46:55,768 P7851 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/test.h5
-2022-05-29 18:46:56,559 P7851 INFO Test samples: total/4587167, pos/1174769, neg/3412398, ratio/25.61%, blocks/1
-2022-05-29 18:46:56,559 P7851 INFO Loading test data done.
-2022-05-29 18:47:11,210 P7851 INFO [Metrics] AUC: 0.813804 - logloss: 0.438155
-
-```
+## DLRM_criteo_x1
+
+A hands-on guide to run the DLRM model on the Criteo_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.6.4
+ pytorch: 1.0.0
+ pandas: 0.22.0
+ numpy: 1.19.2
+ scipy: 1.5.4
+ sklearn: 0.22.1
+ pyyaml: 5.4.1
+ h5py: 2.8.0
+ tqdm: 4.60.0
+ fuxictr: 1.2.1
+
+ ```
+
+### Dataset
+Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo#Criteo_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DLRM](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DLRM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Criteo/Criteo_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DLRM_criteo_x1_tuner_config_02](./DLRM_criteo_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd DLRM_criteo_x1
+ nohup python run_expid.py --config ./DLRM_criteo_x1_tuner_config_02 --expid DLRM_criteo_x1_001_4d897285 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.813804 | 0.438155 |
+
+
+### Logs
+```python
+2022-05-29 15:05:50,295 P7851 INFO {
+ "batch_norm": "True",
+ "batch_size": "4096",
+ "bottom_mlp_activations": "ReLU",
+ "bottom_mlp_dropout": "0",
+ "bottom_mlp_units": "[400, 400, 400]",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x1_7b681156",
+ "debug": "False",
+ "embedding_dim": "10",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'type': 'numeric'}, {'active': True, 'dtype': 'float', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "gpu": "0",
+ "interaction_op": "dot",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "DLRM",
+ "model_id": "DLRM_criteo_x1_001_4d897285",
+ "model_root": "./Criteo/DLRM_criteo_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x1/test.csv",
+ "top_mlp_activations": "ReLU",
+ "top_mlp_dropout": "0.2",
+ "top_mlp_units": "[400, 400, 400]",
+ "train_data": "../data/Criteo/Criteo_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/Criteo/Criteo_x1/valid.csv",
+ "verbose": "0",
+ "version": "pytorch"
+}
+2022-05-29 15:05:50,295 P7851 INFO Set up feature encoder...
+2022-05-29 15:05:50,295 P7851 INFO Load feature_map from json: ../data/Criteo/criteo_x1_7b681156/feature_map.json
+2022-05-29 15:05:50,296 P7851 INFO Loading data...
+2022-05-29 15:05:50,297 P7851 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/train.h5
+2022-05-29 15:05:55,450 P7851 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/valid.h5
+2022-05-29 15:05:56,805 P7851 INFO Train samples: total/33003326, pos/8456369, neg/24546957, ratio/25.62%, blocks/1
+2022-05-29 15:05:56,805 P7851 INFO Validation samples: total/8250124, pos/2114300, neg/6135824, ratio/25.63%, blocks/1
+2022-05-29 15:05:56,805 P7851 INFO Loading train data done.
+2022-05-29 15:06:02,843 P7851 INFO Total number of parameters: 21664241.
+2022-05-29 15:06:02,843 P7851 INFO Start training: 8058 batches/epoch
+2022-05-29 15:06:02,844 P7851 INFO ************ Epoch=1 start ************
+2022-05-29 15:21:26,950 P7851 INFO [Metrics] AUC: 0.804187 - logloss: 0.447040
+2022-05-29 15:21:26,952 P7851 INFO Save best model: monitor(max): 0.804187
+2022-05-29 15:21:27,059 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 15:21:27,104 P7851 INFO Train loss: 0.461660
+2022-05-29 15:21:27,104 P7851 INFO ************ Epoch=1 end ************
+2022-05-29 15:36:49,337 P7851 INFO [Metrics] AUC: 0.806524 - logloss: 0.445125
+2022-05-29 15:36:49,339 P7851 INFO Save best model: monitor(max): 0.806524
+2022-05-29 15:36:49,463 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 15:36:49,502 P7851 INFO Train loss: 0.456277
+2022-05-29 15:36:49,503 P7851 INFO ************ Epoch=2 end ************
+2022-05-29 15:52:12,386 P7851 INFO [Metrics] AUC: 0.807615 - logloss: 0.443908
+2022-05-29 15:52:12,387 P7851 INFO Save best model: monitor(max): 0.807615
+2022-05-29 15:52:12,513 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 15:52:12,573 P7851 INFO Train loss: 0.454813
+2022-05-29 15:52:12,573 P7851 INFO ************ Epoch=3 end ************
+2022-05-29 16:07:31,604 P7851 INFO [Metrics] AUC: 0.807956 - logloss: 0.443703
+2022-05-29 16:07:31,605 P7851 INFO Save best model: monitor(max): 0.807956
+2022-05-29 16:07:31,731 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 16:07:31,779 P7851 INFO Train loss: 0.454118
+2022-05-29 16:07:31,779 P7851 INFO ************ Epoch=4 end ************
+2022-05-29 16:22:48,793 P7851 INFO [Metrics] AUC: 0.808651 - logloss: 0.442900
+2022-05-29 16:22:48,794 P7851 INFO Save best model: monitor(max): 0.808651
+2022-05-29 16:22:48,932 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 16:22:48,978 P7851 INFO Train loss: 0.453736
+2022-05-29 16:22:48,978 P7851 INFO ************ Epoch=5 end ************
+2022-05-29 16:38:05,989 P7851 INFO [Metrics] AUC: 0.808663 - logloss: 0.442960
+2022-05-29 16:38:05,990 P7851 INFO Save best model: monitor(max): 0.808663
+2022-05-29 16:38:06,115 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 16:38:06,159 P7851 INFO Train loss: 0.453420
+2022-05-29 16:38:06,160 P7851 INFO ************ Epoch=6 end ************
+2022-05-29 16:53:22,282 P7851 INFO [Metrics] AUC: 0.809122 - logloss: 0.442572
+2022-05-29 16:53:22,284 P7851 INFO Save best model: monitor(max): 0.809122
+2022-05-29 16:53:22,386 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 16:53:22,425 P7851 INFO Train loss: 0.453186
+2022-05-29 16:53:22,425 P7851 INFO ************ Epoch=7 end ************
+2022-05-29 17:08:38,105 P7851 INFO [Metrics] AUC: 0.809201 - logloss: 0.442405
+2022-05-29 17:08:38,106 P7851 INFO Save best model: monitor(max): 0.809201
+2022-05-29 17:08:38,207 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 17:08:38,245 P7851 INFO Train loss: 0.453003
+2022-05-29 17:08:38,246 P7851 INFO ************ Epoch=8 end ************
+2022-05-29 17:23:54,309 P7851 INFO [Metrics] AUC: 0.809330 - logloss: 0.442346
+2022-05-29 17:23:54,310 P7851 INFO Save best model: monitor(max): 0.809330
+2022-05-29 17:23:54,410 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 17:23:54,448 P7851 INFO Train loss: 0.452831
+2022-05-29 17:23:54,448 P7851 INFO ************ Epoch=9 end ************
+2022-05-29 17:39:10,920 P7851 INFO [Metrics] AUC: 0.809664 - logloss: 0.441991
+2022-05-29 17:39:10,922 P7851 INFO Save best model: monitor(max): 0.809664
+2022-05-29 17:39:11,024 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 17:39:11,062 P7851 INFO Train loss: 0.452682
+2022-05-29 17:39:11,062 P7851 INFO ************ Epoch=10 end ************
+2022-05-29 17:54:24,245 P7851 INFO [Metrics] AUC: 0.809675 - logloss: 0.441967
+2022-05-29 17:54:24,246 P7851 INFO Save best model: monitor(max): 0.809675
+2022-05-29 17:54:24,355 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 17:54:24,394 P7851 INFO Train loss: 0.452588
+2022-05-29 17:54:24,394 P7851 INFO ************ Epoch=11 end ************
+2022-05-29 18:09:40,265 P7851 INFO [Metrics] AUC: 0.809607 - logloss: 0.442167
+2022-05-29 18:09:40,266 P7851 INFO Monitor(max) STOP: 0.809607 !
+2022-05-29 18:09:40,267 P7851 INFO Reduce learning rate on plateau: 0.000100
+2022-05-29 18:09:40,267 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 18:09:40,307 P7851 INFO Train loss: 0.452451
+2022-05-29 18:09:40,307 P7851 INFO ************ Epoch=12 end ************
+2022-05-29 18:17:07,207 P7851 INFO [Metrics] AUC: 0.812970 - logloss: 0.438972
+2022-05-29 18:17:07,208 P7851 INFO Save best model: monitor(max): 0.812970
+2022-05-29 18:17:07,302 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 18:17:07,340 P7851 INFO Train loss: 0.441606
+2022-05-29 18:17:07,341 P7851 INFO ************ Epoch=13 end ************
+2022-05-29 18:24:30,663 P7851 INFO [Metrics] AUC: 0.813400 - logloss: 0.438582
+2022-05-29 18:24:30,665 P7851 INFO Save best model: monitor(max): 0.813400
+2022-05-29 18:24:30,772 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 18:24:30,823 P7851 INFO Train loss: 0.437457
+2022-05-29 18:24:30,823 P7851 INFO ************ Epoch=14 end ************
+2022-05-29 18:31:50,965 P7851 INFO [Metrics] AUC: 0.813497 - logloss: 0.438596
+2022-05-29 18:31:50,966 P7851 INFO Save best model: monitor(max): 0.813497
+2022-05-29 18:31:51,061 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 18:31:51,101 P7851 INFO Train loss: 0.435468
+2022-05-29 18:31:51,101 P7851 INFO ************ Epoch=15 end ************
+2022-05-29 18:39:13,073 P7851 INFO [Metrics] AUC: 0.813298 - logloss: 0.438926
+2022-05-29 18:39:13,075 P7851 INFO Monitor(max) STOP: 0.813298 !
+2022-05-29 18:39:13,075 P7851 INFO Reduce learning rate on plateau: 0.000010
+2022-05-29 18:39:13,075 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 18:39:13,115 P7851 INFO Train loss: 0.433854
+2022-05-29 18:39:13,115 P7851 INFO ************ Epoch=16 end ************
+2022-05-29 18:46:31,049 P7851 INFO [Metrics] AUC: 0.812458 - logloss: 0.440054
+2022-05-29 18:46:31,051 P7851 INFO Monitor(max) STOP: 0.812458 !
+2022-05-29 18:46:31,051 P7851 INFO Reduce learning rate on plateau: 0.000001
+2022-05-29 18:46:31,051 P7851 INFO Early stopping at epoch=17
+2022-05-29 18:46:31,051 P7851 INFO --- 8058/8058 batches finished ---
+2022-05-29 18:46:31,090 P7851 INFO Train loss: 0.428564
+2022-05-29 18:46:31,090 P7851 INFO Training finished.
+2022-05-29 18:46:31,090 P7851 INFO Load best model: /cache/FuxiCTR/benchmarks/Criteo/DLRM_criteo_x1/criteo_x1_7b681156/DLRM_criteo_x1_001_4d897285.model
+2022-05-29 18:46:31,168 P7851 INFO ****** Validation evaluation ******
+2022-05-29 18:46:55,691 P7851 INFO [Metrics] AUC: 0.813497 - logloss: 0.438596
+2022-05-29 18:46:55,768 P7851 INFO ******** Test evaluation ********
+2022-05-29 18:46:55,768 P7851 INFO Loading data...
+2022-05-29 18:46:55,768 P7851 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/test.h5
+2022-05-29 18:46:56,559 P7851 INFO Test samples: total/4587167, pos/1174769, neg/3412398, ratio/25.61%, blocks/1
+2022-05-29 18:46:56,559 P7851 INFO Loading test data done.
+2022-05-29 18:47:11,210 P7851 INFO [Metrics] AUC: 0.813804 - logloss: 0.438155
+
+```
diff --git a/ranking/ctr/DLRM/DLRM_frappe_x1/README.md b/ranking/ctr/DLRM/DLRM_frappe_x1/README.md
index d5498445..50f1e10b 100644
--- a/ranking/ctr/DLRM/DLRM_frappe_x1/README.md
+++ b/ranking/ctr/DLRM/DLRM_frappe_x1/README.md
@@ -1,223 +1,223 @@
-## DLRM_frappe_x1
-
-A hands-on guide to run the DLRM model on the Frappe_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 11.4
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.2.1
- ```
-
-### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe#Frappe_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DLRM](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DLRM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Frappe/Frappe_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DLRM_frappe_x1_tuner_config_02](./DLRM_frappe_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd DLRM_frappe_x1
- nohup python run_expid.py --config ./DLRM_frappe_x1_tuner_config_02 --expid DLRM_frappe_x1_006_216831a3 --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.983995 | 0.144441 |
-
-
-### Logs
-```python
-2022-05-27 14:35:07,576 P18844 INFO {
- "batch_norm": "True",
- "batch_size": "4096",
- "bottom_mlp_activations": "ReLU",
- "bottom_mlp_dropout": "0",
- "bottom_mlp_units": "None",
- "data_format": "csv",
- "data_root": "../data/Frappe/",
- "dataset_id": "frappe_x1_04e961e9",
- "debug": "False",
- "embedding_dim": "10",
- "embedding_regularizer": "0.1",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user', 'item', 'daytime', 'weekday', 'isweekend', 'homework', 'cost', 'weather', 'country', 'city'], 'type': 'categorical'}]",
- "gpu": "0",
- "interaction_op": "cat",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "DLRM",
- "model_id": "DLRM_frappe_x1_006_216831a3",
- "model_root": "./Frappe/DLRM_frappe_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_regularizer": "0",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Frappe/Frappe_x1/test.csv",
- "top_mlp_activations": "ReLU",
- "top_mlp_dropout": "0.4",
- "top_mlp_units": "[400, 400, 400]",
- "train_data": "../data/Frappe/Frappe_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/Frappe/Frappe_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch"
-}
-2022-05-27 14:35:07,576 P18844 INFO Set up feature encoder...
-2022-05-27 14:35:07,576 P18844 INFO Load feature_map from json: ../data/Frappe/frappe_x1_04e961e9/feature_map.json
-2022-05-27 14:35:07,576 P18844 INFO Loading data...
-2022-05-27 14:35:07,578 P18844 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/train.h5
-2022-05-27 14:35:07,589 P18844 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/valid.h5
-2022-05-27 14:35:07,593 P18844 INFO Train samples: total/202027, pos/67604, neg/134423, ratio/33.46%, blocks/1
-2022-05-27 14:35:07,593 P18844 INFO Validation samples: total/57722, pos/19063, neg/38659, ratio/33.03%, blocks/1
-2022-05-27 14:35:07,593 P18844 INFO Loading train data done.
-2022-05-27 14:35:10,767 P18844 INFO Total number of parameters: 417891.
-2022-05-27 14:35:10,767 P18844 INFO Start training: 50 batches/epoch
-2022-05-27 14:35:10,767 P18844 INFO ************ Epoch=1 start ************
-2022-05-27 14:35:14,808 P18844 INFO [Metrics] AUC: 0.934844 - logloss: 0.634514
-2022-05-27 14:35:14,809 P18844 INFO Save best model: monitor(max): 0.934844
-2022-05-27 14:35:14,815 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:14,899 P18844 INFO Train loss: 0.415569
-2022-05-27 14:35:14,899 P18844 INFO ************ Epoch=1 end ************
-2022-05-27 14:35:18,769 P18844 INFO [Metrics] AUC: 0.946014 - logloss: 0.279160
-2022-05-27 14:35:18,770 P18844 INFO Save best model: monitor(max): 0.946014
-2022-05-27 14:35:18,776 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:18,841 P18844 INFO Train loss: 0.308836
-2022-05-27 14:35:18,841 P18844 INFO ************ Epoch=2 end ************
-2022-05-27 14:35:22,490 P18844 INFO [Metrics] AUC: 0.962075 - logloss: 0.224911
-2022-05-27 14:35:22,491 P18844 INFO Save best model: monitor(max): 0.962075
-2022-05-27 14:35:22,496 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:22,548 P18844 INFO Train loss: 0.276737
-2022-05-27 14:35:22,548 P18844 INFO ************ Epoch=3 end ************
-2022-05-27 14:35:25,881 P18844 INFO [Metrics] AUC: 0.969775 - logloss: 0.233633
-2022-05-27 14:35:25,882 P18844 INFO Save best model: monitor(max): 0.969775
-2022-05-27 14:35:25,886 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:25,936 P18844 INFO Train loss: 0.258146
-2022-05-27 14:35:25,936 P18844 INFO ************ Epoch=4 end ************
-2022-05-27 14:35:29,183 P18844 INFO [Metrics] AUC: 0.972859 - logloss: 0.188215
-2022-05-27 14:35:29,184 P18844 INFO Save best model: monitor(max): 0.972859
-2022-05-27 14:35:29,189 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:29,240 P18844 INFO Train loss: 0.247808
-2022-05-27 14:35:29,240 P18844 INFO ************ Epoch=5 end ************
-2022-05-27 14:35:32,574 P18844 INFO [Metrics] AUC: 0.974283 - logloss: 0.190274
-2022-05-27 14:35:32,575 P18844 INFO Save best model: monitor(max): 0.974283
-2022-05-27 14:35:32,581 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:32,640 P18844 INFO Train loss: 0.240821
-2022-05-27 14:35:32,640 P18844 INFO ************ Epoch=6 end ************
-2022-05-27 14:35:36,034 P18844 INFO [Metrics] AUC: 0.975577 - logloss: 0.179150
-2022-05-27 14:35:36,035 P18844 INFO Save best model: monitor(max): 0.975577
-2022-05-27 14:35:36,039 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:36,076 P18844 INFO Train loss: 0.236149
-2022-05-27 14:35:36,076 P18844 INFO ************ Epoch=7 end ************
-2022-05-27 14:35:39,469 P18844 INFO [Metrics] AUC: 0.975064 - logloss: 0.210407
-2022-05-27 14:35:39,470 P18844 INFO Monitor(max) STOP: 0.975064 !
-2022-05-27 14:35:39,470 P18844 INFO Reduce learning rate on plateau: 0.000100
-2022-05-27 14:35:39,470 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:39,513 P18844 INFO Train loss: 0.230933
-2022-05-27 14:35:39,513 P18844 INFO ************ Epoch=8 end ************
-2022-05-27 14:35:42,838 P18844 INFO [Metrics] AUC: 0.982071 - logloss: 0.147709
-2022-05-27 14:35:42,839 P18844 INFO Save best model: monitor(max): 0.982071
-2022-05-27 14:35:42,843 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:42,893 P18844 INFO Train loss: 0.196917
-2022-05-27 14:35:42,893 P18844 INFO ************ Epoch=9 end ************
-2022-05-27 14:35:46,409 P18844 INFO [Metrics] AUC: 0.983588 - logloss: 0.141859
-2022-05-27 14:35:46,409 P18844 INFO Save best model: monitor(max): 0.983588
-2022-05-27 14:35:46,414 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:46,451 P18844 INFO Train loss: 0.161139
-2022-05-27 14:35:46,451 P18844 INFO ************ Epoch=10 end ************
-2022-05-27 14:35:50,082 P18844 INFO [Metrics] AUC: 0.984340 - logloss: 0.139732
-2022-05-27 14:35:50,082 P18844 INFO Save best model: monitor(max): 0.984340
-2022-05-27 14:35:50,088 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:50,130 P18844 INFO Train loss: 0.138565
-2022-05-27 14:35:50,130 P18844 INFO ************ Epoch=11 end ************
-2022-05-27 14:35:53,918 P18844 INFO [Metrics] AUC: 0.984674 - logloss: 0.139663
-2022-05-27 14:35:53,919 P18844 INFO Save best model: monitor(max): 0.984674
-2022-05-27 14:35:53,923 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:53,974 P18844 INFO Train loss: 0.122550
-2022-05-27 14:35:53,974 P18844 INFO ************ Epoch=12 end ************
-2022-05-27 14:35:57,709 P18844 INFO [Metrics] AUC: 0.984839 - logloss: 0.140914
-2022-05-27 14:35:57,709 P18844 INFO Save best model: monitor(max): 0.984839
-2022-05-27 14:35:57,717 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:35:57,757 P18844 INFO Train loss: 0.112116
-2022-05-27 14:35:57,757 P18844 INFO ************ Epoch=13 end ************
-2022-05-27 14:36:01,512 P18844 INFO [Metrics] AUC: 0.985190 - logloss: 0.139815
-2022-05-27 14:36:01,513 P18844 INFO Save best model: monitor(max): 0.985190
-2022-05-27 14:36:01,518 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:36:01,556 P18844 INFO Train loss: 0.103366
-2022-05-27 14:36:01,556 P18844 INFO ************ Epoch=14 end ************
-2022-05-27 14:36:03,847 P18844 INFO [Metrics] AUC: 0.984822 - logloss: 0.143240
-2022-05-27 14:36:03,848 P18844 INFO Monitor(max) STOP: 0.984822 !
-2022-05-27 14:36:03,848 P18844 INFO Reduce learning rate on plateau: 0.000010
-2022-05-27 14:36:03,848 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:36:03,886 P18844 INFO Train loss: 0.097550
-2022-05-27 14:36:03,886 P18844 INFO ************ Epoch=15 end ************
-2022-05-27 14:36:05,562 P18844 INFO [Metrics] AUC: 0.984737 - logloss: 0.144866
-2022-05-27 14:36:05,563 P18844 INFO Monitor(max) STOP: 0.984737 !
-2022-05-27 14:36:05,563 P18844 INFO Reduce learning rate on plateau: 0.000001
-2022-05-27 14:36:05,563 P18844 INFO Early stopping at epoch=16
-2022-05-27 14:36:05,563 P18844 INFO --- 50/50 batches finished ---
-2022-05-27 14:36:05,601 P18844 INFO Train loss: 0.088039
-2022-05-27 14:36:05,601 P18844 INFO Training finished.
-2022-05-27 14:36:05,601 P18844 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Frappe/DLRM_frappe_x1/frappe_x1_04e961e9/DLRM_frappe_x1_006_216831a3.model
-2022-05-27 14:36:05,610 P18844 INFO ****** Validation evaluation ******
-2022-05-27 14:36:05,922 P18844 INFO [Metrics] AUC: 0.985190 - logloss: 0.139815
-2022-05-27 14:36:05,961 P18844 INFO ******** Test evaluation ********
-2022-05-27 14:36:05,962 P18844 INFO Loading data...
-2022-05-27 14:36:05,962 P18844 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/test.h5
-2022-05-27 14:36:05,965 P18844 INFO Test samples: total/28860, pos/9536, neg/19324, ratio/33.04%, blocks/1
-2022-05-27 14:36:05,965 P18844 INFO Loading test data done.
-2022-05-27 14:36:06,205 P18844 INFO [Metrics] AUC: 0.983995 - logloss: 0.144441
-
-```
+## DLRM_frappe_x1
+
+A hands-on guide to run the DLRM model on the Frappe_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 11.4
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.2.1
+ ```
+
+### Dataset
+Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe#Frappe_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DLRM](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DLRM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Frappe/Frappe_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DLRM_frappe_x1_tuner_config_02](./DLRM_frappe_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd DLRM_frappe_x1
+ nohup python run_expid.py --config ./DLRM_frappe_x1_tuner_config_02 --expid DLRM_frappe_x1_006_216831a3 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.983995 | 0.144441 |
+
+
+### Logs
+```python
+2022-05-27 14:35:07,576 P18844 INFO {
+ "batch_norm": "True",
+ "batch_size": "4096",
+ "bottom_mlp_activations": "ReLU",
+ "bottom_mlp_dropout": "0",
+ "bottom_mlp_units": "None",
+ "data_format": "csv",
+ "data_root": "../data/Frappe/",
+ "dataset_id": "frappe_x1_04e961e9",
+ "debug": "False",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.1",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user', 'item', 'daytime', 'weekday', 'isweekend', 'homework', 'cost', 'weather', 'country', 'city'], 'type': 'categorical'}]",
+ "gpu": "0",
+ "interaction_op": "cat",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "DLRM",
+ "model_id": "DLRM_frappe_x1_006_216831a3",
+ "model_root": "./Frappe/DLRM_frappe_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Frappe/Frappe_x1/test.csv",
+ "top_mlp_activations": "ReLU",
+ "top_mlp_dropout": "0.4",
+ "top_mlp_units": "[400, 400, 400]",
+ "train_data": "../data/Frappe/Frappe_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/Frappe/Frappe_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch"
+}
+2022-05-27 14:35:07,576 P18844 INFO Set up feature encoder...
+2022-05-27 14:35:07,576 P18844 INFO Load feature_map from json: ../data/Frappe/frappe_x1_04e961e9/feature_map.json
+2022-05-27 14:35:07,576 P18844 INFO Loading data...
+2022-05-27 14:35:07,578 P18844 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/train.h5
+2022-05-27 14:35:07,589 P18844 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/valid.h5
+2022-05-27 14:35:07,593 P18844 INFO Train samples: total/202027, pos/67604, neg/134423, ratio/33.46%, blocks/1
+2022-05-27 14:35:07,593 P18844 INFO Validation samples: total/57722, pos/19063, neg/38659, ratio/33.03%, blocks/1
+2022-05-27 14:35:07,593 P18844 INFO Loading train data done.
+2022-05-27 14:35:10,767 P18844 INFO Total number of parameters: 417891.
+2022-05-27 14:35:10,767 P18844 INFO Start training: 50 batches/epoch
+2022-05-27 14:35:10,767 P18844 INFO ************ Epoch=1 start ************
+2022-05-27 14:35:14,808 P18844 INFO [Metrics] AUC: 0.934844 - logloss: 0.634514
+2022-05-27 14:35:14,809 P18844 INFO Save best model: monitor(max): 0.934844
+2022-05-27 14:35:14,815 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:14,899 P18844 INFO Train loss: 0.415569
+2022-05-27 14:35:14,899 P18844 INFO ************ Epoch=1 end ************
+2022-05-27 14:35:18,769 P18844 INFO [Metrics] AUC: 0.946014 - logloss: 0.279160
+2022-05-27 14:35:18,770 P18844 INFO Save best model: monitor(max): 0.946014
+2022-05-27 14:35:18,776 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:18,841 P18844 INFO Train loss: 0.308836
+2022-05-27 14:35:18,841 P18844 INFO ************ Epoch=2 end ************
+2022-05-27 14:35:22,490 P18844 INFO [Metrics] AUC: 0.962075 - logloss: 0.224911
+2022-05-27 14:35:22,491 P18844 INFO Save best model: monitor(max): 0.962075
+2022-05-27 14:35:22,496 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:22,548 P18844 INFO Train loss: 0.276737
+2022-05-27 14:35:22,548 P18844 INFO ************ Epoch=3 end ************
+2022-05-27 14:35:25,881 P18844 INFO [Metrics] AUC: 0.969775 - logloss: 0.233633
+2022-05-27 14:35:25,882 P18844 INFO Save best model: monitor(max): 0.969775
+2022-05-27 14:35:25,886 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:25,936 P18844 INFO Train loss: 0.258146
+2022-05-27 14:35:25,936 P18844 INFO ************ Epoch=4 end ************
+2022-05-27 14:35:29,183 P18844 INFO [Metrics] AUC: 0.972859 - logloss: 0.188215
+2022-05-27 14:35:29,184 P18844 INFO Save best model: monitor(max): 0.972859
+2022-05-27 14:35:29,189 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:29,240 P18844 INFO Train loss: 0.247808
+2022-05-27 14:35:29,240 P18844 INFO ************ Epoch=5 end ************
+2022-05-27 14:35:32,574 P18844 INFO [Metrics] AUC: 0.974283 - logloss: 0.190274
+2022-05-27 14:35:32,575 P18844 INFO Save best model: monitor(max): 0.974283
+2022-05-27 14:35:32,581 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:32,640 P18844 INFO Train loss: 0.240821
+2022-05-27 14:35:32,640 P18844 INFO ************ Epoch=6 end ************
+2022-05-27 14:35:36,034 P18844 INFO [Metrics] AUC: 0.975577 - logloss: 0.179150
+2022-05-27 14:35:36,035 P18844 INFO Save best model: monitor(max): 0.975577
+2022-05-27 14:35:36,039 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:36,076 P18844 INFO Train loss: 0.236149
+2022-05-27 14:35:36,076 P18844 INFO ************ Epoch=7 end ************
+2022-05-27 14:35:39,469 P18844 INFO [Metrics] AUC: 0.975064 - logloss: 0.210407
+2022-05-27 14:35:39,470 P18844 INFO Monitor(max) STOP: 0.975064 !
+2022-05-27 14:35:39,470 P18844 INFO Reduce learning rate on plateau: 0.000100
+2022-05-27 14:35:39,470 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:39,513 P18844 INFO Train loss: 0.230933
+2022-05-27 14:35:39,513 P18844 INFO ************ Epoch=8 end ************
+2022-05-27 14:35:42,838 P18844 INFO [Metrics] AUC: 0.982071 - logloss: 0.147709
+2022-05-27 14:35:42,839 P18844 INFO Save best model: monitor(max): 0.982071
+2022-05-27 14:35:42,843 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:42,893 P18844 INFO Train loss: 0.196917
+2022-05-27 14:35:42,893 P18844 INFO ************ Epoch=9 end ************
+2022-05-27 14:35:46,409 P18844 INFO [Metrics] AUC: 0.983588 - logloss: 0.141859
+2022-05-27 14:35:46,409 P18844 INFO Save best model: monitor(max): 0.983588
+2022-05-27 14:35:46,414 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:46,451 P18844 INFO Train loss: 0.161139
+2022-05-27 14:35:46,451 P18844 INFO ************ Epoch=10 end ************
+2022-05-27 14:35:50,082 P18844 INFO [Metrics] AUC: 0.984340 - logloss: 0.139732
+2022-05-27 14:35:50,082 P18844 INFO Save best model: monitor(max): 0.984340
+2022-05-27 14:35:50,088 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:50,130 P18844 INFO Train loss: 0.138565
+2022-05-27 14:35:50,130 P18844 INFO ************ Epoch=11 end ************
+2022-05-27 14:35:53,918 P18844 INFO [Metrics] AUC: 0.984674 - logloss: 0.139663
+2022-05-27 14:35:53,919 P18844 INFO Save best model: monitor(max): 0.984674
+2022-05-27 14:35:53,923 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:53,974 P18844 INFO Train loss: 0.122550
+2022-05-27 14:35:53,974 P18844 INFO ************ Epoch=12 end ************
+2022-05-27 14:35:57,709 P18844 INFO [Metrics] AUC: 0.984839 - logloss: 0.140914
+2022-05-27 14:35:57,709 P18844 INFO Save best model: monitor(max): 0.984839
+2022-05-27 14:35:57,717 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:35:57,757 P18844 INFO Train loss: 0.112116
+2022-05-27 14:35:57,757 P18844 INFO ************ Epoch=13 end ************
+2022-05-27 14:36:01,512 P18844 INFO [Metrics] AUC: 0.985190 - logloss: 0.139815
+2022-05-27 14:36:01,513 P18844 INFO Save best model: monitor(max): 0.985190
+2022-05-27 14:36:01,518 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:36:01,556 P18844 INFO Train loss: 0.103366
+2022-05-27 14:36:01,556 P18844 INFO ************ Epoch=14 end ************
+2022-05-27 14:36:03,847 P18844 INFO [Metrics] AUC: 0.984822 - logloss: 0.143240
+2022-05-27 14:36:03,848 P18844 INFO Monitor(max) STOP: 0.984822 !
+2022-05-27 14:36:03,848 P18844 INFO Reduce learning rate on plateau: 0.000010
+2022-05-27 14:36:03,848 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:36:03,886 P18844 INFO Train loss: 0.097550
+2022-05-27 14:36:03,886 P18844 INFO ************ Epoch=15 end ************
+2022-05-27 14:36:05,562 P18844 INFO [Metrics] AUC: 0.984737 - logloss: 0.144866
+2022-05-27 14:36:05,563 P18844 INFO Monitor(max) STOP: 0.984737 !
+2022-05-27 14:36:05,563 P18844 INFO Reduce learning rate on plateau: 0.000001
+2022-05-27 14:36:05,563 P18844 INFO Early stopping at epoch=16
+2022-05-27 14:36:05,563 P18844 INFO --- 50/50 batches finished ---
+2022-05-27 14:36:05,601 P18844 INFO Train loss: 0.088039
+2022-05-27 14:36:05,601 P18844 INFO Training finished.
+2022-05-27 14:36:05,601 P18844 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Frappe/DLRM_frappe_x1/frappe_x1_04e961e9/DLRM_frappe_x1_006_216831a3.model
+2022-05-27 14:36:05,610 P18844 INFO ****** Validation evaluation ******
+2022-05-27 14:36:05,922 P18844 INFO [Metrics] AUC: 0.985190 - logloss: 0.139815
+2022-05-27 14:36:05,961 P18844 INFO ******** Test evaluation ********
+2022-05-27 14:36:05,962 P18844 INFO Loading data...
+2022-05-27 14:36:05,962 P18844 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/test.h5
+2022-05-27 14:36:05,965 P18844 INFO Test samples: total/28860, pos/9536, neg/19324, ratio/33.04%, blocks/1
+2022-05-27 14:36:05,965 P18844 INFO Loading test data done.
+2022-05-27 14:36:06,205 P18844 INFO [Metrics] AUC: 0.983995 - logloss: 0.144441
+
+```
diff --git a/ranking/ctr/DLRM/DLRM_movielenslatest_x1/README.md b/ranking/ctr/DLRM/DLRM_movielenslatest_x1/README.md
index ca8150dc..f62c0655 100644
--- a/ranking/ctr/DLRM/DLRM_movielenslatest_x1/README.md
+++ b/ranking/ctr/DLRM/DLRM_movielenslatest_x1/README.md
@@ -1,238 +1,238 @@
-## DLRM_movielenslatest_x1
-
-A hands-on guide to run the DLRM model on the MovielensLatest_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 11.4
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.2.1
- ```
-
-### Dataset
-Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DLRM](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DLRM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DLRM_movielenslatest_x1_tuner_config_04](./DLRM_movielenslatest_x1_tuner_config_04). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd DLRM_movielenslatest_x1
- nohup python run_expid.py --config ./DLRM_movielenslatest_x1_tuner_config_04 --expid DLRM_movielenslatest_x1_002_333e0a39 --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.969071 | 0.214993 |
-
-
-### Logs
-```python
-2022-05-27 17:45:47,993 P56424 INFO {
- "batch_norm": "True",
- "batch_size": "4096",
- "bottom_mlp_activations": "ReLU",
- "bottom_mlp_dropout": "0",
- "bottom_mlp_units": "None",
- "data_format": "csv",
- "data_root": "../data/Movielens/",
- "dataset_id": "movielenslatest_x1_cd32d937",
- "debug": "False",
- "embedding_dim": "10",
- "embedding_regularizer": "0.01",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user_id', 'item_id', 'tag_id'], 'type': 'categorical'}]",
- "gpu": "1",
- "interaction_op": "cat",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "DLRM",
- "model_id": "DLRM_movielenslatest_x1_002_333e0a39",
- "model_root": "./Movielens/DLRM_movielenslatest_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_regularizer": "0",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
- "top_mlp_activations": "ReLU",
- "top_mlp_dropout": "0.2",
- "top_mlp_units": "[400, 400, 400]",
- "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch"
-}
-2022-05-27 17:45:47,994 P56424 INFO Set up feature encoder...
-2022-05-27 17:45:47,995 P56424 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_cd32d937/feature_map.json
-2022-05-27 17:45:47,995 P56424 INFO Loading data...
-2022-05-27 17:45:48,004 P56424 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/train.h5
-2022-05-27 17:45:48,040 P56424 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/valid.h5
-2022-05-27 17:45:48,053 P56424 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
-2022-05-27 17:45:48,053 P56424 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
-2022-05-27 17:45:48,053 P56424 INFO Loading train data done.
-2022-05-27 17:45:50,966 P56424 INFO Total number of parameters: 1238391.
-2022-05-27 17:45:50,966 P56424 INFO Start training: 343 batches/epoch
-2022-05-27 17:45:50,966 P56424 INFO ************ Epoch=1 start ************
-2022-05-27 17:46:01,289 P56424 INFO [Metrics] AUC: 0.934995 - logloss: 0.291341
-2022-05-27 17:46:01,289 P56424 INFO Save best model: monitor(max): 0.934995
-2022-05-27 17:46:01,297 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:46:01,354 P56424 INFO Train loss: 0.377448
-2022-05-27 17:46:01,354 P56424 INFO ************ Epoch=1 end ************
-2022-05-27 17:46:11,537 P56424 INFO [Metrics] AUC: 0.945625 - logloss: 0.265396
-2022-05-27 17:46:11,538 P56424 INFO Save best model: monitor(max): 0.945625
-2022-05-27 17:46:11,549 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:46:11,595 P56424 INFO Train loss: 0.362417
-2022-05-27 17:46:11,595 P56424 INFO ************ Epoch=2 end ************
-2022-05-27 17:46:21,815 P56424 INFO [Metrics] AUC: 0.949287 - logloss: 0.253220
-2022-05-27 17:46:21,815 P56424 INFO Save best model: monitor(max): 0.949287
-2022-05-27 17:46:21,823 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:46:21,858 P56424 INFO Train loss: 0.364127
-2022-05-27 17:46:21,858 P56424 INFO ************ Epoch=3 end ************
-2022-05-27 17:46:32,344 P56424 INFO [Metrics] AUC: 0.950830 - logloss: 0.248907
-2022-05-27 17:46:32,345 P56424 INFO Save best model: monitor(max): 0.950830
-2022-05-27 17:46:32,356 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:46:32,417 P56424 INFO Train loss: 0.368697
-2022-05-27 17:46:32,417 P56424 INFO ************ Epoch=4 end ************
-2022-05-27 17:46:42,475 P56424 INFO [Metrics] AUC: 0.951979 - logloss: 0.244560
-2022-05-27 17:46:42,475 P56424 INFO Save best model: monitor(max): 0.951979
-2022-05-27 17:46:42,484 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:46:42,533 P56424 INFO Train loss: 0.371929
-2022-05-27 17:46:42,533 P56424 INFO ************ Epoch=5 end ************
-2022-05-27 17:46:52,494 P56424 INFO [Metrics] AUC: 0.953028 - logloss: 0.246918
-2022-05-27 17:46:52,495 P56424 INFO Save best model: monitor(max): 0.953028
-2022-05-27 17:46:52,504 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:46:52,541 P56424 INFO Train loss: 0.375755
-2022-05-27 17:46:52,541 P56424 INFO ************ Epoch=6 end ************
-2022-05-27 17:47:02,352 P56424 INFO [Metrics] AUC: 0.953333 - logloss: 0.242936
-2022-05-27 17:47:02,353 P56424 INFO Save best model: monitor(max): 0.953333
-2022-05-27 17:47:02,361 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:47:02,408 P56424 INFO Train loss: 0.375239
-2022-05-27 17:47:02,408 P56424 INFO ************ Epoch=7 end ************
-2022-05-27 17:47:12,267 P56424 INFO [Metrics] AUC: 0.954451 - logloss: 0.242778
-2022-05-27 17:47:12,268 P56424 INFO Save best model: monitor(max): 0.954451
-2022-05-27 17:47:12,278 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:47:12,320 P56424 INFO Train loss: 0.376126
-2022-05-27 17:47:12,320 P56424 INFO ************ Epoch=8 end ************
-2022-05-27 17:47:22,250 P56424 INFO [Metrics] AUC: 0.955334 - logloss: 0.239819
-2022-05-27 17:47:22,251 P56424 INFO Save best model: monitor(max): 0.955334
-2022-05-27 17:47:22,258 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:47:22,322 P56424 INFO Train loss: 0.377568
-2022-05-27 17:47:22,322 P56424 INFO ************ Epoch=9 end ************
-2022-05-27 17:47:32,017 P56424 INFO [Metrics] AUC: 0.955787 - logloss: 0.234790
-2022-05-27 17:47:32,018 P56424 INFO Save best model: monitor(max): 0.955787
-2022-05-27 17:47:32,025 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:47:32,067 P56424 INFO Train loss: 0.376616
-2022-05-27 17:47:32,067 P56424 INFO ************ Epoch=10 end ************
-2022-05-27 17:47:42,021 P56424 INFO [Metrics] AUC: 0.956218 - logloss: 0.235922
-2022-05-27 17:47:42,022 P56424 INFO Save best model: monitor(max): 0.956218
-2022-05-27 17:47:42,033 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:47:42,082 P56424 INFO Train loss: 0.376828
-2022-05-27 17:47:42,082 P56424 INFO ************ Epoch=11 end ************
-2022-05-27 17:47:48,606 P56424 INFO [Metrics] AUC: 0.956553 - logloss: 0.236383
-2022-05-27 17:47:48,607 P56424 INFO Save best model: monitor(max): 0.956553
-2022-05-27 17:47:48,615 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:47:48,667 P56424 INFO Train loss: 0.378037
-2022-05-27 17:47:48,667 P56424 INFO ************ Epoch=12 end ************
-2022-05-27 17:47:54,978 P56424 INFO [Metrics] AUC: 0.956843 - logloss: 0.235801
-2022-05-27 17:47:54,979 P56424 INFO Save best model: monitor(max): 0.956843
-2022-05-27 17:47:54,990 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:47:55,040 P56424 INFO Train loss: 0.377164
-2022-05-27 17:47:55,041 P56424 INFO ************ Epoch=13 end ************
-2022-05-27 17:48:01,359 P56424 INFO [Metrics] AUC: 0.957092 - logloss: 0.231618
-2022-05-27 17:48:01,361 P56424 INFO Save best model: monitor(max): 0.957092
-2022-05-27 17:48:01,371 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:48:01,409 P56424 INFO Train loss: 0.376691
-2022-05-27 17:48:01,409 P56424 INFO ************ Epoch=14 end ************
-2022-05-27 17:48:07,426 P56424 INFO [Metrics] AUC: 0.956775 - logloss: 0.231541
-2022-05-27 17:48:07,426 P56424 INFO Monitor(max) STOP: 0.956775 !
-2022-05-27 17:48:07,426 P56424 INFO Reduce learning rate on plateau: 0.000100
-2022-05-27 17:48:07,427 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:48:07,469 P56424 INFO Train loss: 0.376863
-2022-05-27 17:48:07,470 P56424 INFO ************ Epoch=15 end ************
-2022-05-27 17:48:13,584 P56424 INFO [Metrics] AUC: 0.967864 - logloss: 0.206055
-2022-05-27 17:48:13,585 P56424 INFO Save best model: monitor(max): 0.967864
-2022-05-27 17:48:13,596 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:48:13,649 P56424 INFO Train loss: 0.273575
-2022-05-27 17:48:13,649 P56424 INFO ************ Epoch=16 end ************
-2022-05-27 17:48:19,686 P56424 INFO [Metrics] AUC: 0.969119 - logloss: 0.214729
-2022-05-27 17:48:19,687 P56424 INFO Save best model: monitor(max): 0.969119
-2022-05-27 17:48:19,695 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:48:19,745 P56424 INFO Train loss: 0.185297
-2022-05-27 17:48:19,745 P56424 INFO ************ Epoch=17 end ************
-2022-05-27 17:48:25,743 P56424 INFO [Metrics] AUC: 0.967576 - logloss: 0.240492
-2022-05-27 17:48:25,744 P56424 INFO Monitor(max) STOP: 0.967576 !
-2022-05-27 17:48:25,744 P56424 INFO Reduce learning rate on plateau: 0.000010
-2022-05-27 17:48:25,744 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:48:25,795 P56424 INFO Train loss: 0.140979
-2022-05-27 17:48:25,795 P56424 INFO ************ Epoch=18 end ************
-2022-05-27 17:48:31,748 P56424 INFO [Metrics] AUC: 0.967095 - logloss: 0.253392
-2022-05-27 17:48:31,748 P56424 INFO Monitor(max) STOP: 0.967095 !
-2022-05-27 17:48:31,749 P56424 INFO Reduce learning rate on plateau: 0.000001
-2022-05-27 17:48:31,749 P56424 INFO Early stopping at epoch=19
-2022-05-27 17:48:31,749 P56424 INFO --- 343/343 batches finished ---
-2022-05-27 17:48:31,795 P56424 INFO Train loss: 0.109826
-2022-05-27 17:48:31,795 P56424 INFO Training finished.
-2022-05-27 17:48:31,795 P56424 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Movielens/DLRM_movielenslatest_x1/movielenslatest_x1_cd32d937/DLRM_movielenslatest_x1_002_333e0a39.model
-2022-05-27 17:48:34,455 P56424 INFO ****** Validation evaluation ******
-2022-05-27 17:48:35,720 P56424 INFO [Metrics] AUC: 0.969119 - logloss: 0.214729
-2022-05-27 17:48:35,752 P56424 INFO ******** Test evaluation ********
-2022-05-27 17:48:35,752 P56424 INFO Loading data...
-2022-05-27 17:48:35,753 P56424 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/test.h5
-2022-05-27 17:48:35,756 P56424 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
-2022-05-27 17:48:35,757 P56424 INFO Loading test data done.
-2022-05-27 17:48:36,507 P56424 INFO [Metrics] AUC: 0.969071 - logloss: 0.214993
-
-```
+## DLRM_movielenslatest_x1
+
+A hands-on guide to run the DLRM model on the MovielensLatest_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 11.4
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.2.1
+ ```
+
+### Dataset
+Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DLRM](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DLRM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DLRM_movielenslatest_x1_tuner_config_04](./DLRM_movielenslatest_x1_tuner_config_04). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd DLRM_movielenslatest_x1
+ nohup python run_expid.py --config ./DLRM_movielenslatest_x1_tuner_config_04 --expid DLRM_movielenslatest_x1_002_333e0a39 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.969071 | 0.214993 |
+
+
+### Logs
+```python
+2022-05-27 17:45:47,993 P56424 INFO {
+ "batch_norm": "True",
+ "batch_size": "4096",
+ "bottom_mlp_activations": "ReLU",
+ "bottom_mlp_dropout": "0",
+ "bottom_mlp_units": "None",
+ "data_format": "csv",
+ "data_root": "../data/Movielens/",
+ "dataset_id": "movielenslatest_x1_cd32d937",
+ "debug": "False",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.01",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user_id', 'item_id', 'tag_id'], 'type': 'categorical'}]",
+ "gpu": "1",
+ "interaction_op": "cat",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "DLRM",
+ "model_id": "DLRM_movielenslatest_x1_002_333e0a39",
+ "model_root": "./Movielens/DLRM_movielenslatest_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
+ "top_mlp_activations": "ReLU",
+ "top_mlp_dropout": "0.2",
+ "top_mlp_units": "[400, 400, 400]",
+ "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch"
+}
+2022-05-27 17:45:47,994 P56424 INFO Set up feature encoder...
+2022-05-27 17:45:47,995 P56424 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_cd32d937/feature_map.json
+2022-05-27 17:45:47,995 P56424 INFO Loading data...
+2022-05-27 17:45:48,004 P56424 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/train.h5
+2022-05-27 17:45:48,040 P56424 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/valid.h5
+2022-05-27 17:45:48,053 P56424 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
+2022-05-27 17:45:48,053 P56424 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
+2022-05-27 17:45:48,053 P56424 INFO Loading train data done.
+2022-05-27 17:45:50,966 P56424 INFO Total number of parameters: 1238391.
+2022-05-27 17:45:50,966 P56424 INFO Start training: 343 batches/epoch
+2022-05-27 17:45:50,966 P56424 INFO ************ Epoch=1 start ************
+2022-05-27 17:46:01,289 P56424 INFO [Metrics] AUC: 0.934995 - logloss: 0.291341
+2022-05-27 17:46:01,289 P56424 INFO Save best model: monitor(max): 0.934995
+2022-05-27 17:46:01,297 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:46:01,354 P56424 INFO Train loss: 0.377448
+2022-05-27 17:46:01,354 P56424 INFO ************ Epoch=1 end ************
+2022-05-27 17:46:11,537 P56424 INFO [Metrics] AUC: 0.945625 - logloss: 0.265396
+2022-05-27 17:46:11,538 P56424 INFO Save best model: monitor(max): 0.945625
+2022-05-27 17:46:11,549 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:46:11,595 P56424 INFO Train loss: 0.362417
+2022-05-27 17:46:11,595 P56424 INFO ************ Epoch=2 end ************
+2022-05-27 17:46:21,815 P56424 INFO [Metrics] AUC: 0.949287 - logloss: 0.253220
+2022-05-27 17:46:21,815 P56424 INFO Save best model: monitor(max): 0.949287
+2022-05-27 17:46:21,823 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:46:21,858 P56424 INFO Train loss: 0.364127
+2022-05-27 17:46:21,858 P56424 INFO ************ Epoch=3 end ************
+2022-05-27 17:46:32,344 P56424 INFO [Metrics] AUC: 0.950830 - logloss: 0.248907
+2022-05-27 17:46:32,345 P56424 INFO Save best model: monitor(max): 0.950830
+2022-05-27 17:46:32,356 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:46:32,417 P56424 INFO Train loss: 0.368697
+2022-05-27 17:46:32,417 P56424 INFO ************ Epoch=4 end ************
+2022-05-27 17:46:42,475 P56424 INFO [Metrics] AUC: 0.951979 - logloss: 0.244560
+2022-05-27 17:46:42,475 P56424 INFO Save best model: monitor(max): 0.951979
+2022-05-27 17:46:42,484 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:46:42,533 P56424 INFO Train loss: 0.371929
+2022-05-27 17:46:42,533 P56424 INFO ************ Epoch=5 end ************
+2022-05-27 17:46:52,494 P56424 INFO [Metrics] AUC: 0.953028 - logloss: 0.246918
+2022-05-27 17:46:52,495 P56424 INFO Save best model: monitor(max): 0.953028
+2022-05-27 17:46:52,504 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:46:52,541 P56424 INFO Train loss: 0.375755
+2022-05-27 17:46:52,541 P56424 INFO ************ Epoch=6 end ************
+2022-05-27 17:47:02,352 P56424 INFO [Metrics] AUC: 0.953333 - logloss: 0.242936
+2022-05-27 17:47:02,353 P56424 INFO Save best model: monitor(max): 0.953333
+2022-05-27 17:47:02,361 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:47:02,408 P56424 INFO Train loss: 0.375239
+2022-05-27 17:47:02,408 P56424 INFO ************ Epoch=7 end ************
+2022-05-27 17:47:12,267 P56424 INFO [Metrics] AUC: 0.954451 - logloss: 0.242778
+2022-05-27 17:47:12,268 P56424 INFO Save best model: monitor(max): 0.954451
+2022-05-27 17:47:12,278 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:47:12,320 P56424 INFO Train loss: 0.376126
+2022-05-27 17:47:12,320 P56424 INFO ************ Epoch=8 end ************
+2022-05-27 17:47:22,250 P56424 INFO [Metrics] AUC: 0.955334 - logloss: 0.239819
+2022-05-27 17:47:22,251 P56424 INFO Save best model: monitor(max): 0.955334
+2022-05-27 17:47:22,258 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:47:22,322 P56424 INFO Train loss: 0.377568
+2022-05-27 17:47:22,322 P56424 INFO ************ Epoch=9 end ************
+2022-05-27 17:47:32,017 P56424 INFO [Metrics] AUC: 0.955787 - logloss: 0.234790
+2022-05-27 17:47:32,018 P56424 INFO Save best model: monitor(max): 0.955787
+2022-05-27 17:47:32,025 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:47:32,067 P56424 INFO Train loss: 0.376616
+2022-05-27 17:47:32,067 P56424 INFO ************ Epoch=10 end ************
+2022-05-27 17:47:42,021 P56424 INFO [Metrics] AUC: 0.956218 - logloss: 0.235922
+2022-05-27 17:47:42,022 P56424 INFO Save best model: monitor(max): 0.956218
+2022-05-27 17:47:42,033 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:47:42,082 P56424 INFO Train loss: 0.376828
+2022-05-27 17:47:42,082 P56424 INFO ************ Epoch=11 end ************
+2022-05-27 17:47:48,606 P56424 INFO [Metrics] AUC: 0.956553 - logloss: 0.236383
+2022-05-27 17:47:48,607 P56424 INFO Save best model: monitor(max): 0.956553
+2022-05-27 17:47:48,615 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:47:48,667 P56424 INFO Train loss: 0.378037
+2022-05-27 17:47:48,667 P56424 INFO ************ Epoch=12 end ************
+2022-05-27 17:47:54,978 P56424 INFO [Metrics] AUC: 0.956843 - logloss: 0.235801
+2022-05-27 17:47:54,979 P56424 INFO Save best model: monitor(max): 0.956843
+2022-05-27 17:47:54,990 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:47:55,040 P56424 INFO Train loss: 0.377164
+2022-05-27 17:47:55,041 P56424 INFO ************ Epoch=13 end ************
+2022-05-27 17:48:01,359 P56424 INFO [Metrics] AUC: 0.957092 - logloss: 0.231618
+2022-05-27 17:48:01,361 P56424 INFO Save best model: monitor(max): 0.957092
+2022-05-27 17:48:01,371 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:48:01,409 P56424 INFO Train loss: 0.376691
+2022-05-27 17:48:01,409 P56424 INFO ************ Epoch=14 end ************
+2022-05-27 17:48:07,426 P56424 INFO [Metrics] AUC: 0.956775 - logloss: 0.231541
+2022-05-27 17:48:07,426 P56424 INFO Monitor(max) STOP: 0.956775 !
+2022-05-27 17:48:07,426 P56424 INFO Reduce learning rate on plateau: 0.000100
+2022-05-27 17:48:07,427 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:48:07,469 P56424 INFO Train loss: 0.376863
+2022-05-27 17:48:07,470 P56424 INFO ************ Epoch=15 end ************
+2022-05-27 17:48:13,584 P56424 INFO [Metrics] AUC: 0.967864 - logloss: 0.206055
+2022-05-27 17:48:13,585 P56424 INFO Save best model: monitor(max): 0.967864
+2022-05-27 17:48:13,596 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:48:13,649 P56424 INFO Train loss: 0.273575
+2022-05-27 17:48:13,649 P56424 INFO ************ Epoch=16 end ************
+2022-05-27 17:48:19,686 P56424 INFO [Metrics] AUC: 0.969119 - logloss: 0.214729
+2022-05-27 17:48:19,687 P56424 INFO Save best model: monitor(max): 0.969119
+2022-05-27 17:48:19,695 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:48:19,745 P56424 INFO Train loss: 0.185297
+2022-05-27 17:48:19,745 P56424 INFO ************ Epoch=17 end ************
+2022-05-27 17:48:25,743 P56424 INFO [Metrics] AUC: 0.967576 - logloss: 0.240492
+2022-05-27 17:48:25,744 P56424 INFO Monitor(max) STOP: 0.967576 !
+2022-05-27 17:48:25,744 P56424 INFO Reduce learning rate on plateau: 0.000010
+2022-05-27 17:48:25,744 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:48:25,795 P56424 INFO Train loss: 0.140979
+2022-05-27 17:48:25,795 P56424 INFO ************ Epoch=18 end ************
+2022-05-27 17:48:31,748 P56424 INFO [Metrics] AUC: 0.967095 - logloss: 0.253392
+2022-05-27 17:48:31,748 P56424 INFO Monitor(max) STOP: 0.967095 !
+2022-05-27 17:48:31,749 P56424 INFO Reduce learning rate on plateau: 0.000001
+2022-05-27 17:48:31,749 P56424 INFO Early stopping at epoch=19
+2022-05-27 17:48:31,749 P56424 INFO --- 343/343 batches finished ---
+2022-05-27 17:48:31,795 P56424 INFO Train loss: 0.109826
+2022-05-27 17:48:31,795 P56424 INFO Training finished.
+2022-05-27 17:48:31,795 P56424 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Movielens/DLRM_movielenslatest_x1/movielenslatest_x1_cd32d937/DLRM_movielenslatest_x1_002_333e0a39.model
+2022-05-27 17:48:34,455 P56424 INFO ****** Validation evaluation ******
+2022-05-27 17:48:35,720 P56424 INFO [Metrics] AUC: 0.969119 - logloss: 0.214729
+2022-05-27 17:48:35,752 P56424 INFO ******** Test evaluation ********
+2022-05-27 17:48:35,752 P56424 INFO Loading data...
+2022-05-27 17:48:35,753 P56424 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/test.h5
+2022-05-27 17:48:35,756 P56424 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
+2022-05-27 17:48:35,757 P56424 INFO Loading test data done.
+2022-05-27 17:48:36,507 P56424 INFO [Metrics] AUC: 0.969071 - logloss: 0.214993
+
+```
diff --git a/ranking/ctr/DNN/DNN_amazonelectronics_x1/README.md b/ranking/ctr/DNN/DNN_amazonelectronics_x1/README.md
index 8e494965..fbb3b86a 100644
--- a/ranking/ctr/DNN/DNN_amazonelectronics_x1/README.md
+++ b/ranking/ctr/DNN/DNN_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [DNN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DNN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DNN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DNN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DNN/DNN_avazu_x1/README.md b/ranking/ctr/DNN/DNN_avazu_x1/README.md
index 452b5bec..aae3c8a2 100644
--- a/ranking/ctr/DNN/DNN_avazu_x1/README.md
+++ b/ranking/ctr/DNN/DNN_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -36,15 +36,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DNN/DNN_avazu_x4_001/README.md b/ranking/ctr/DNN/DNN_avazu_x4_001/README.md
index 56a7eb50..f2f2ad44 100644
--- a/ranking/ctr/DNN/DNN_avazu_x4_001/README.md
+++ b/ranking/ctr/DNN/DNN_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DNN/DNN_avazu_x4_002/README.md b/ranking/ctr/DNN/DNN_avazu_x4_002/README.md
index f651bf8c..d78f5354 100644
--- a/ranking/ctr/DNN/DNN_avazu_x4_002/README.md
+++ b/ranking/ctr/DNN/DNN_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DNN/DNN_criteo_x1/README.md b/ranking/ctr/DNN/DNN_criteo_x1/README.md
index 8dfb0615..d94fec3e 100644
--- a/ranking/ctr/DNN/DNN_criteo_x1/README.md
+++ b/ranking/ctr/DNN/DNN_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -36,15 +36,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DNN/DNN_criteo_x4_001/README.md b/ranking/ctr/DNN/DNN_criteo_x4_001/README.md
index 960b416e..341f65c6 100644
--- a/ranking/ctr/DNN/DNN_criteo_x4_001/README.md
+++ b/ranking/ctr/DNN/DNN_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DNN/DNN_criteo_x4_002/README.md b/ranking/ctr/DNN/DNN_criteo_x4_002/README.md
index 80ca158e..5ce0c284 100644
--- a/ranking/ctr/DNN/DNN_criteo_x4_002/README.md
+++ b/ranking/ctr/DNN/DNN_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DNN/DNN_frappe_x1/README.md b/ranking/ctr/DNN/DNN_frappe_x1/README.md
index 5121c719..02675b82 100644
--- a/ranking/ctr/DNN/DNN_frappe_x1/README.md
+++ b/ranking/ctr/DNN/DNN_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DNN/DNN_kkbox_x1/README.md b/ranking/ctr/DNN/DNN_kkbox_x1/README.md
index b77c1406..5f05090d 100644
--- a/ranking/ctr/DNN/DNN_kkbox_x1/README.md
+++ b/ranking/ctr/DNN/DNN_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DNN/DNN_kuaivideo_x1/README.md b/ranking/ctr/DNN/DNN_kuaivideo_x1/README.md
index 074f0b8b..7fc349d7 100644
--- a/ranking/ctr/DNN/DNN_kuaivideo_x1/README.md
+++ b/ranking/ctr/DNN/DNN_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [DNN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DNN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DNN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DNN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DNN/DNN_microvideo1.7m_x1/README.md b/ranking/ctr/DNN/DNN_microvideo1.7m_x1/README.md
index 10a00844..5c7608ea 100644
--- a/ranking/ctr/DNN/DNN_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/DNN/DNN_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [DNN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DNN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DNN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DNN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DNN/DNN_movielenslatest_x1/README.md b/ranking/ctr/DNN/DNN_movielenslatest_x1/README.md
index 9efce67e..bdc25681 100644
--- a/ranking/ctr/DNN/DNN_movielenslatest_x1/README.md
+++ b/ranking/ctr/DNN/DNN_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DNN/DNN_taobaoad_x1/README.md b/ranking/ctr/DNN/DNN_taobaoad_x1/README.md
index ff86d141..a5fca080 100644
--- a/ranking/ctr/DNN/DNN_taobaoad_x1/README.md
+++ b/ranking/ctr/DNN/DNN_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DNN model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [DNN](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DNN) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DNN](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DNN) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DSSM/DSSM_frappe_x1/README.md b/ranking/ctr/DSSM/DSSM_frappe_x1/README.md
index e78be54a..aca225b2 100644
--- a/ranking/ctr/DSSM/DSSM_frappe_x1/README.md
+++ b/ranking/ctr/DSSM/DSSM_frappe_x1/README.md
@@ -1,258 +1,258 @@
-## DSSM_frappe_x1
-
-A hands-on guide to run the DSSM model on the Frappe_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 11.4
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.2.1
- ```
-
-### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe#Frappe_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DSSM](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DSSM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Frappe/Frappe_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DSSM_frappe_x1_tuner_config_02](./DSSM_frappe_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd DSSM_frappe_x1
- nohup python run_expid.py --config ./DSSM_frappe_x1_tuner_config_02 --expid DSSM_frappe_x1_006_4be388eb --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.983221 | 0.187438 |
-
-
-### Logs
-```python
-2022-04-13 11:55:40,527 P31863 INFO {
- "batch_norm": "True",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Frappe/",
- "dataset_id": "frappe_x1_db5a3f58",
- "debug": "False",
- "embedding_dim": "10",
- "embedding_regularizer": "0.1",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user', 'daytime', 'weekday', 'isweekend', 'homework', 'weather', 'country', 'city'], 'source': 'user', 'type': 'categorical'}, {'active': True, 'dtype': 'float', 'name': ['item', 'cost'], 'source': 'item', 'type': 'categorical'}]",
- "gpu": "1",
- "item_tower_activations": "ReLU",
- "item_tower_dropout": "0.2",
- "item_tower_units": "[400, 400, 400]",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "DSSM",
- "model_id": "DSSM_frappe_x1_006_4be388eb",
- "model_root": "./Frappe/DSSM_frappe_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_regularizer": "0",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Frappe/Frappe_x1/test.csv",
- "train_data": "../data/Frappe/Frappe_x1/train.csv",
- "use_hdf5": "True",
- "user_tower_activations": "ReLU",
- "user_tower_dropout": "0.1",
- "user_tower_units": "[400, 400, 400]",
- "valid_data": "../data/Frappe/Frappe_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch"
-}
-2022-04-13 11:55:40,528 P31863 INFO Set up feature encoder...
-2022-04-13 11:55:40,528 P31863 INFO Load feature_map from json: ../data/Frappe/frappe_x1_db5a3f58/feature_map.json
-2022-04-13 11:55:40,528 P31863 INFO Loading data...
-2022-04-13 11:55:40,531 P31863 INFO Loading data from h5: ../data/Frappe/frappe_x1_db5a3f58/train.h5
-2022-04-13 11:55:40,542 P31863 INFO Loading data from h5: ../data/Frappe/frappe_x1_db5a3f58/valid.h5
-2022-04-13 11:55:40,546 P31863 INFO Train samples: total/202027, pos/67604, neg/134423, ratio/33.46%, blocks/1
-2022-04-13 11:55:40,546 P31863 INFO Validation samples: total/57722, pos/19063, neg/38659, ratio/33.03%, blocks/1
-2022-04-13 11:55:40,546 P31863 INFO Loading train data done.
-2022-04-13 11:55:44,684 P31863 INFO Total number of parameters: 739490.
-2022-04-13 11:55:44,685 P31863 INFO Start training: 50 batches/epoch
-2022-04-13 11:55:44,685 P31863 INFO ************ Epoch=1 start ************
-2022-04-13 11:55:50,880 P31863 INFO [Metrics] AUC: 0.900741 - logloss: 0.611823
-2022-04-13 11:55:50,881 P31863 INFO Save best model: monitor(max): 0.900741
-2022-04-13 11:55:50,886 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:55:50,929 P31863 INFO Train loss: 0.650903
-2022-04-13 11:55:50,929 P31863 INFO ************ Epoch=1 end ************
-2022-04-13 11:55:56,885 P31863 INFO [Metrics] AUC: 0.954946 - logloss: 0.279399
-2022-04-13 11:55:56,886 P31863 INFO Save best model: monitor(max): 0.954946
-2022-04-13 11:55:56,892 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:55:56,927 P31863 INFO Train loss: 0.331125
-2022-04-13 11:55:56,927 P31863 INFO ************ Epoch=2 end ************
-2022-04-13 11:56:02,535 P31863 INFO [Metrics] AUC: 0.967419 - logloss: 0.216603
-2022-04-13 11:56:02,535 P31863 INFO Save best model: monitor(max): 0.967419
-2022-04-13 11:56:02,542 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:02,583 P31863 INFO Train loss: 0.267234
-2022-04-13 11:56:02,584 P31863 INFO ************ Epoch=3 end ************
-2022-04-13 11:56:07,801 P31863 INFO [Metrics] AUC: 0.972068 - logloss: 0.210400
-2022-04-13 11:56:07,801 P31863 INFO Save best model: monitor(max): 0.972068
-2022-04-13 11:56:07,810 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:07,852 P31863 INFO Train loss: 0.241815
-2022-04-13 11:56:07,852 P31863 INFO ************ Epoch=4 end ************
-2022-04-13 11:56:13,194 P31863 INFO [Metrics] AUC: 0.973752 - logloss: 0.220257
-2022-04-13 11:56:13,195 P31863 INFO Save best model: monitor(max): 0.973752
-2022-04-13 11:56:13,201 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:13,242 P31863 INFO Train loss: 0.227181
-2022-04-13 11:56:13,243 P31863 INFO ************ Epoch=5 end ************
-2022-04-13 11:56:18,483 P31863 INFO [Metrics] AUC: 0.974975 - logloss: 0.189582
-2022-04-13 11:56:18,484 P31863 INFO Save best model: monitor(max): 0.974975
-2022-04-13 11:56:18,490 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:18,535 P31863 INFO Train loss: 0.219095
-2022-04-13 11:56:18,535 P31863 INFO ************ Epoch=6 end ************
-2022-04-13 11:56:23,707 P31863 INFO [Metrics] AUC: 0.976341 - logloss: 0.204962
-2022-04-13 11:56:23,709 P31863 INFO Save best model: monitor(max): 0.976341
-2022-04-13 11:56:23,718 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:23,761 P31863 INFO Train loss: 0.212992
-2022-04-13 11:56:23,761 P31863 INFO ************ Epoch=7 end ************
-2022-04-13 11:56:28,970 P31863 INFO [Metrics] AUC: 0.976750 - logloss: 0.180353
-2022-04-13 11:56:28,971 P31863 INFO Save best model: monitor(max): 0.976750
-2022-04-13 11:56:28,979 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:29,016 P31863 INFO Train loss: 0.210784
-2022-04-13 11:56:29,016 P31863 INFO ************ Epoch=8 end ************
-2022-04-13 11:56:34,938 P31863 INFO [Metrics] AUC: 0.977987 - logloss: 0.182927
-2022-04-13 11:56:34,939 P31863 INFO Save best model: monitor(max): 0.977987
-2022-04-13 11:56:34,946 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:34,992 P31863 INFO Train loss: 0.204913
-2022-04-13 11:56:34,992 P31863 INFO ************ Epoch=9 end ************
-2022-04-13 11:56:41,175 P31863 INFO [Metrics] AUC: 0.976822 - logloss: 0.199739
-2022-04-13 11:56:41,176 P31863 INFO Monitor(max) STOP: 0.976822 !
-2022-04-13 11:56:41,176 P31863 INFO Reduce learning rate on plateau: 0.000100
-2022-04-13 11:56:41,176 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:41,221 P31863 INFO Train loss: 0.205779
-2022-04-13 11:56:41,221 P31863 INFO ************ Epoch=10 end ************
-2022-04-13 11:56:47,388 P31863 INFO [Metrics] AUC: 0.982380 - logloss: 0.158879
-2022-04-13 11:56:47,389 P31863 INFO Save best model: monitor(max): 0.982380
-2022-04-13 11:56:47,408 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:47,469 P31863 INFO Train loss: 0.173535
-2022-04-13 11:56:47,469 P31863 INFO ************ Epoch=11 end ************
-2022-04-13 11:56:53,624 P31863 INFO [Metrics] AUC: 0.983463 - logloss: 0.158115
-2022-04-13 11:56:53,625 P31863 INFO Save best model: monitor(max): 0.983463
-2022-04-13 11:56:53,634 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:53,681 P31863 INFO Train loss: 0.144106
-2022-04-13 11:56:53,681 P31863 INFO ************ Epoch=12 end ************
-2022-04-13 11:56:59,380 P31863 INFO [Metrics] AUC: 0.984005 - logloss: 0.159751
-2022-04-13 11:56:59,381 P31863 INFO Save best model: monitor(max): 0.984005
-2022-04-13 11:56:59,387 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:56:59,438 P31863 INFO Train loss: 0.126666
-2022-04-13 11:56:59,438 P31863 INFO ************ Epoch=13 end ************
-2022-04-13 11:57:04,229 P31863 INFO [Metrics] AUC: 0.984054 - logloss: 0.165974
-2022-04-13 11:57:04,230 P31863 INFO Save best model: monitor(max): 0.984054
-2022-04-13 11:57:04,236 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:57:04,274 P31863 INFO Train loss: 0.113567
-2022-04-13 11:57:04,274 P31863 INFO ************ Epoch=14 end ************
-2022-04-13 11:57:08,970 P31863 INFO [Metrics] AUC: 0.984203 - logloss: 0.169526
-2022-04-13 11:57:08,970 P31863 INFO Save best model: monitor(max): 0.984203
-2022-04-13 11:57:08,976 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:57:09,015 P31863 INFO Train loss: 0.102512
-2022-04-13 11:57:09,015 P31863 INFO ************ Epoch=15 end ************
-2022-04-13 11:57:14,990 P31863 INFO [Metrics] AUC: 0.984106 - logloss: 0.174788
-2022-04-13 11:57:14,990 P31863 INFO Monitor(max) STOP: 0.984106 !
-2022-04-13 11:57:14,990 P31863 INFO Reduce learning rate on plateau: 0.000010
-2022-04-13 11:57:14,991 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:57:15,030 P31863 INFO Train loss: 0.095254
-2022-04-13 11:57:15,030 P31863 INFO ************ Epoch=16 end ************
-2022-04-13 11:57:19,600 P31863 INFO [Metrics] AUC: 0.984262 - logloss: 0.175753
-2022-04-13 11:57:19,601 P31863 INFO Save best model: monitor(max): 0.984262
-2022-04-13 11:57:19,610 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:57:19,646 P31863 INFO Train loss: 0.087010
-2022-04-13 11:57:19,646 P31863 INFO ************ Epoch=17 end ************
-2022-04-13 11:57:24,089 P31863 INFO [Metrics] AUC: 0.984265 - logloss: 0.177004
-2022-04-13 11:57:24,090 P31863 INFO Save best model: monitor(max): 0.984265
-2022-04-13 11:57:24,099 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:57:24,141 P31863 INFO Train loss: 0.084949
-2022-04-13 11:57:24,141 P31863 INFO ************ Epoch=18 end ************
-2022-04-13 11:57:28,611 P31863 INFO [Metrics] AUC: 0.984269 - logloss: 0.176973
-2022-04-13 11:57:28,612 P31863 INFO Save best model: monitor(max): 0.984269
-2022-04-13 11:57:28,620 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:57:28,666 P31863 INFO Train loss: 0.084518
-2022-04-13 11:57:28,666 P31863 INFO ************ Epoch=19 end ************
-2022-04-13 11:57:34,168 P31863 INFO [Metrics] AUC: 0.984280 - logloss: 0.178141
-2022-04-13 11:57:34,168 P31863 INFO Save best model: monitor(max): 0.984280
-2022-04-13 11:57:34,174 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:57:34,225 P31863 INFO Train loss: 0.083657
-2022-04-13 11:57:34,225 P31863 INFO ************ Epoch=20 end ************
-2022-04-13 11:57:39,887 P31863 INFO [Metrics] AUC: 0.984285 - logloss: 0.179113
-2022-04-13 11:57:39,888 P31863 INFO Save best model: monitor(max): 0.984285
-2022-04-13 11:57:39,896 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:57:39,940 P31863 INFO Train loss: 0.082723
-2022-04-13 11:57:39,940 P31863 INFO ************ Epoch=21 end ************
-2022-04-13 11:57:45,701 P31863 INFO [Metrics] AUC: 0.984264 - logloss: 0.179959
-2022-04-13 11:57:45,702 P31863 INFO Monitor(max) STOP: 0.984264 !
-2022-04-13 11:57:45,702 P31863 INFO Reduce learning rate on plateau: 0.000001
-2022-04-13 11:57:45,702 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:57:45,746 P31863 INFO Train loss: 0.081612
-2022-04-13 11:57:45,746 P31863 INFO ************ Epoch=22 end ************
-2022-04-13 11:57:51,619 P31863 INFO [Metrics] AUC: 0.984255 - logloss: 0.180367
-2022-04-13 11:57:51,620 P31863 INFO Monitor(max) STOP: 0.984255 !
-2022-04-13 11:57:51,620 P31863 INFO Reduce learning rate on plateau: 0.000001
-2022-04-13 11:57:51,621 P31863 INFO Early stopping at epoch=23
-2022-04-13 11:57:51,621 P31863 INFO --- 50/50 batches finished ---
-2022-04-13 11:57:51,662 P31863 INFO Train loss: 0.080517
-2022-04-13 11:57:51,662 P31863 INFO Training finished.
-2022-04-13 11:57:51,662 P31863 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Frappe/DSSM_frappe_x1/frappe_x1_db5a3f58/DSSM_frappe_x1_006_4be388eb.model
-2022-04-13 11:57:55,845 P31863 INFO ****** Validation evaluation ******
-2022-04-13 11:57:56,255 P31863 INFO [Metrics] AUC: 0.984285 - logloss: 0.179113
-2022-04-13 11:57:56,314 P31863 INFO ******** Test evaluation ********
-2022-04-13 11:57:56,314 P31863 INFO Loading data...
-2022-04-13 11:57:56,315 P31863 INFO Loading data from h5: ../data/Frappe/frappe_x1_db5a3f58/test.h5
-2022-04-13 11:57:56,318 P31863 INFO Test samples: total/28860, pos/9536, neg/19324, ratio/33.04%, blocks/1
-2022-04-13 11:57:56,319 P31863 INFO Loading test data done.
-2022-04-13 11:57:56,617 P31863 INFO [Metrics] AUC: 0.983221 - logloss: 0.187438
-
-```
+## DSSM_frappe_x1
+
+A hands-on guide to run the DSSM model on the Frappe_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 11.4
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.2.1
+ ```
+
+### Dataset
+Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe#Frappe_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DSSM](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DSSM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Frappe/Frappe_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DSSM_frappe_x1_tuner_config_02](./DSSM_frappe_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd DSSM_frappe_x1
+ nohup python run_expid.py --config ./DSSM_frappe_x1_tuner_config_02 --expid DSSM_frappe_x1_006_4be388eb --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.983221 | 0.187438 |
+
+
+### Logs
+```python
+2022-04-13 11:55:40,527 P31863 INFO {
+ "batch_norm": "True",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Frappe/",
+ "dataset_id": "frappe_x1_db5a3f58",
+ "debug": "False",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.1",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user', 'daytime', 'weekday', 'isweekend', 'homework', 'weather', 'country', 'city'], 'source': 'user', 'type': 'categorical'}, {'active': True, 'dtype': 'float', 'name': ['item', 'cost'], 'source': 'item', 'type': 'categorical'}]",
+ "gpu": "1",
+ "item_tower_activations": "ReLU",
+ "item_tower_dropout": "0.2",
+ "item_tower_units": "[400, 400, 400]",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "DSSM",
+ "model_id": "DSSM_frappe_x1_006_4be388eb",
+ "model_root": "./Frappe/DSSM_frappe_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Frappe/Frappe_x1/test.csv",
+ "train_data": "../data/Frappe/Frappe_x1/train.csv",
+ "use_hdf5": "True",
+ "user_tower_activations": "ReLU",
+ "user_tower_dropout": "0.1",
+ "user_tower_units": "[400, 400, 400]",
+ "valid_data": "../data/Frappe/Frappe_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch"
+}
+2022-04-13 11:55:40,528 P31863 INFO Set up feature encoder...
+2022-04-13 11:55:40,528 P31863 INFO Load feature_map from json: ../data/Frappe/frappe_x1_db5a3f58/feature_map.json
+2022-04-13 11:55:40,528 P31863 INFO Loading data...
+2022-04-13 11:55:40,531 P31863 INFO Loading data from h5: ../data/Frappe/frappe_x1_db5a3f58/train.h5
+2022-04-13 11:55:40,542 P31863 INFO Loading data from h5: ../data/Frappe/frappe_x1_db5a3f58/valid.h5
+2022-04-13 11:55:40,546 P31863 INFO Train samples: total/202027, pos/67604, neg/134423, ratio/33.46%, blocks/1
+2022-04-13 11:55:40,546 P31863 INFO Validation samples: total/57722, pos/19063, neg/38659, ratio/33.03%, blocks/1
+2022-04-13 11:55:40,546 P31863 INFO Loading train data done.
+2022-04-13 11:55:44,684 P31863 INFO Total number of parameters: 739490.
+2022-04-13 11:55:44,685 P31863 INFO Start training: 50 batches/epoch
+2022-04-13 11:55:44,685 P31863 INFO ************ Epoch=1 start ************
+2022-04-13 11:55:50,880 P31863 INFO [Metrics] AUC: 0.900741 - logloss: 0.611823
+2022-04-13 11:55:50,881 P31863 INFO Save best model: monitor(max): 0.900741
+2022-04-13 11:55:50,886 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:55:50,929 P31863 INFO Train loss: 0.650903
+2022-04-13 11:55:50,929 P31863 INFO ************ Epoch=1 end ************
+2022-04-13 11:55:56,885 P31863 INFO [Metrics] AUC: 0.954946 - logloss: 0.279399
+2022-04-13 11:55:56,886 P31863 INFO Save best model: monitor(max): 0.954946
+2022-04-13 11:55:56,892 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:55:56,927 P31863 INFO Train loss: 0.331125
+2022-04-13 11:55:56,927 P31863 INFO ************ Epoch=2 end ************
+2022-04-13 11:56:02,535 P31863 INFO [Metrics] AUC: 0.967419 - logloss: 0.216603
+2022-04-13 11:56:02,535 P31863 INFO Save best model: monitor(max): 0.967419
+2022-04-13 11:56:02,542 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:02,583 P31863 INFO Train loss: 0.267234
+2022-04-13 11:56:02,584 P31863 INFO ************ Epoch=3 end ************
+2022-04-13 11:56:07,801 P31863 INFO [Metrics] AUC: 0.972068 - logloss: 0.210400
+2022-04-13 11:56:07,801 P31863 INFO Save best model: monitor(max): 0.972068
+2022-04-13 11:56:07,810 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:07,852 P31863 INFO Train loss: 0.241815
+2022-04-13 11:56:07,852 P31863 INFO ************ Epoch=4 end ************
+2022-04-13 11:56:13,194 P31863 INFO [Metrics] AUC: 0.973752 - logloss: 0.220257
+2022-04-13 11:56:13,195 P31863 INFO Save best model: monitor(max): 0.973752
+2022-04-13 11:56:13,201 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:13,242 P31863 INFO Train loss: 0.227181
+2022-04-13 11:56:13,243 P31863 INFO ************ Epoch=5 end ************
+2022-04-13 11:56:18,483 P31863 INFO [Metrics] AUC: 0.974975 - logloss: 0.189582
+2022-04-13 11:56:18,484 P31863 INFO Save best model: monitor(max): 0.974975
+2022-04-13 11:56:18,490 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:18,535 P31863 INFO Train loss: 0.219095
+2022-04-13 11:56:18,535 P31863 INFO ************ Epoch=6 end ************
+2022-04-13 11:56:23,707 P31863 INFO [Metrics] AUC: 0.976341 - logloss: 0.204962
+2022-04-13 11:56:23,709 P31863 INFO Save best model: monitor(max): 0.976341
+2022-04-13 11:56:23,718 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:23,761 P31863 INFO Train loss: 0.212992
+2022-04-13 11:56:23,761 P31863 INFO ************ Epoch=7 end ************
+2022-04-13 11:56:28,970 P31863 INFO [Metrics] AUC: 0.976750 - logloss: 0.180353
+2022-04-13 11:56:28,971 P31863 INFO Save best model: monitor(max): 0.976750
+2022-04-13 11:56:28,979 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:29,016 P31863 INFO Train loss: 0.210784
+2022-04-13 11:56:29,016 P31863 INFO ************ Epoch=8 end ************
+2022-04-13 11:56:34,938 P31863 INFO [Metrics] AUC: 0.977987 - logloss: 0.182927
+2022-04-13 11:56:34,939 P31863 INFO Save best model: monitor(max): 0.977987
+2022-04-13 11:56:34,946 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:34,992 P31863 INFO Train loss: 0.204913
+2022-04-13 11:56:34,992 P31863 INFO ************ Epoch=9 end ************
+2022-04-13 11:56:41,175 P31863 INFO [Metrics] AUC: 0.976822 - logloss: 0.199739
+2022-04-13 11:56:41,176 P31863 INFO Monitor(max) STOP: 0.976822 !
+2022-04-13 11:56:41,176 P31863 INFO Reduce learning rate on plateau: 0.000100
+2022-04-13 11:56:41,176 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:41,221 P31863 INFO Train loss: 0.205779
+2022-04-13 11:56:41,221 P31863 INFO ************ Epoch=10 end ************
+2022-04-13 11:56:47,388 P31863 INFO [Metrics] AUC: 0.982380 - logloss: 0.158879
+2022-04-13 11:56:47,389 P31863 INFO Save best model: monitor(max): 0.982380
+2022-04-13 11:56:47,408 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:47,469 P31863 INFO Train loss: 0.173535
+2022-04-13 11:56:47,469 P31863 INFO ************ Epoch=11 end ************
+2022-04-13 11:56:53,624 P31863 INFO [Metrics] AUC: 0.983463 - logloss: 0.158115
+2022-04-13 11:56:53,625 P31863 INFO Save best model: monitor(max): 0.983463
+2022-04-13 11:56:53,634 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:53,681 P31863 INFO Train loss: 0.144106
+2022-04-13 11:56:53,681 P31863 INFO ************ Epoch=12 end ************
+2022-04-13 11:56:59,380 P31863 INFO [Metrics] AUC: 0.984005 - logloss: 0.159751
+2022-04-13 11:56:59,381 P31863 INFO Save best model: monitor(max): 0.984005
+2022-04-13 11:56:59,387 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:56:59,438 P31863 INFO Train loss: 0.126666
+2022-04-13 11:56:59,438 P31863 INFO ************ Epoch=13 end ************
+2022-04-13 11:57:04,229 P31863 INFO [Metrics] AUC: 0.984054 - logloss: 0.165974
+2022-04-13 11:57:04,230 P31863 INFO Save best model: monitor(max): 0.984054
+2022-04-13 11:57:04,236 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:57:04,274 P31863 INFO Train loss: 0.113567
+2022-04-13 11:57:04,274 P31863 INFO ************ Epoch=14 end ************
+2022-04-13 11:57:08,970 P31863 INFO [Metrics] AUC: 0.984203 - logloss: 0.169526
+2022-04-13 11:57:08,970 P31863 INFO Save best model: monitor(max): 0.984203
+2022-04-13 11:57:08,976 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:57:09,015 P31863 INFO Train loss: 0.102512
+2022-04-13 11:57:09,015 P31863 INFO ************ Epoch=15 end ************
+2022-04-13 11:57:14,990 P31863 INFO [Metrics] AUC: 0.984106 - logloss: 0.174788
+2022-04-13 11:57:14,990 P31863 INFO Monitor(max) STOP: 0.984106 !
+2022-04-13 11:57:14,990 P31863 INFO Reduce learning rate on plateau: 0.000010
+2022-04-13 11:57:14,991 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:57:15,030 P31863 INFO Train loss: 0.095254
+2022-04-13 11:57:15,030 P31863 INFO ************ Epoch=16 end ************
+2022-04-13 11:57:19,600 P31863 INFO [Metrics] AUC: 0.984262 - logloss: 0.175753
+2022-04-13 11:57:19,601 P31863 INFO Save best model: monitor(max): 0.984262
+2022-04-13 11:57:19,610 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:57:19,646 P31863 INFO Train loss: 0.087010
+2022-04-13 11:57:19,646 P31863 INFO ************ Epoch=17 end ************
+2022-04-13 11:57:24,089 P31863 INFO [Metrics] AUC: 0.984265 - logloss: 0.177004
+2022-04-13 11:57:24,090 P31863 INFO Save best model: monitor(max): 0.984265
+2022-04-13 11:57:24,099 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:57:24,141 P31863 INFO Train loss: 0.084949
+2022-04-13 11:57:24,141 P31863 INFO ************ Epoch=18 end ************
+2022-04-13 11:57:28,611 P31863 INFO [Metrics] AUC: 0.984269 - logloss: 0.176973
+2022-04-13 11:57:28,612 P31863 INFO Save best model: monitor(max): 0.984269
+2022-04-13 11:57:28,620 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:57:28,666 P31863 INFO Train loss: 0.084518
+2022-04-13 11:57:28,666 P31863 INFO ************ Epoch=19 end ************
+2022-04-13 11:57:34,168 P31863 INFO [Metrics] AUC: 0.984280 - logloss: 0.178141
+2022-04-13 11:57:34,168 P31863 INFO Save best model: monitor(max): 0.984280
+2022-04-13 11:57:34,174 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:57:34,225 P31863 INFO Train loss: 0.083657
+2022-04-13 11:57:34,225 P31863 INFO ************ Epoch=20 end ************
+2022-04-13 11:57:39,887 P31863 INFO [Metrics] AUC: 0.984285 - logloss: 0.179113
+2022-04-13 11:57:39,888 P31863 INFO Save best model: monitor(max): 0.984285
+2022-04-13 11:57:39,896 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:57:39,940 P31863 INFO Train loss: 0.082723
+2022-04-13 11:57:39,940 P31863 INFO ************ Epoch=21 end ************
+2022-04-13 11:57:45,701 P31863 INFO [Metrics] AUC: 0.984264 - logloss: 0.179959
+2022-04-13 11:57:45,702 P31863 INFO Monitor(max) STOP: 0.984264 !
+2022-04-13 11:57:45,702 P31863 INFO Reduce learning rate on plateau: 0.000001
+2022-04-13 11:57:45,702 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:57:45,746 P31863 INFO Train loss: 0.081612
+2022-04-13 11:57:45,746 P31863 INFO ************ Epoch=22 end ************
+2022-04-13 11:57:51,619 P31863 INFO [Metrics] AUC: 0.984255 - logloss: 0.180367
+2022-04-13 11:57:51,620 P31863 INFO Monitor(max) STOP: 0.984255 !
+2022-04-13 11:57:51,620 P31863 INFO Reduce learning rate on plateau: 0.000001
+2022-04-13 11:57:51,621 P31863 INFO Early stopping at epoch=23
+2022-04-13 11:57:51,621 P31863 INFO --- 50/50 batches finished ---
+2022-04-13 11:57:51,662 P31863 INFO Train loss: 0.080517
+2022-04-13 11:57:51,662 P31863 INFO Training finished.
+2022-04-13 11:57:51,662 P31863 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Frappe/DSSM_frappe_x1/frappe_x1_db5a3f58/DSSM_frappe_x1_006_4be388eb.model
+2022-04-13 11:57:55,845 P31863 INFO ****** Validation evaluation ******
+2022-04-13 11:57:56,255 P31863 INFO [Metrics] AUC: 0.984285 - logloss: 0.179113
+2022-04-13 11:57:56,314 P31863 INFO ******** Test evaluation ********
+2022-04-13 11:57:56,314 P31863 INFO Loading data...
+2022-04-13 11:57:56,315 P31863 INFO Loading data from h5: ../data/Frappe/frappe_x1_db5a3f58/test.h5
+2022-04-13 11:57:56,318 P31863 INFO Test samples: total/28860, pos/9536, neg/19324, ratio/33.04%, blocks/1
+2022-04-13 11:57:56,319 P31863 INFO Loading test data done.
+2022-04-13 11:57:56,617 P31863 INFO [Metrics] AUC: 0.983221 - logloss: 0.187438
+
+```
diff --git a/ranking/ctr/DSSM/DSSM_movielenslatest_x1/README.md b/ranking/ctr/DSSM/DSSM_movielenslatest_x1/README.md
index 5fe1766d..22f00ee3 100644
--- a/ranking/ctr/DSSM/DSSM_movielenslatest_x1/README.md
+++ b/ranking/ctr/DSSM/DSSM_movielenslatest_x1/README.md
@@ -1,207 +1,207 @@
-## DSSM_movielenslatest_x1
-
-A hands-on guide to run the DSSM model on the MovielensLatest_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 11.4
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.2.1
- ```
-
-### Dataset
-Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DSSM](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DSSM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DSSM_movielenslatest_x1_tuner_config_01](./DSSM_movielenslatest_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd DSSM_movielenslatest_x1
- nohup python run_expid.py --config ./DSSM_movielenslatest_x1_tuner_config_01 --expid DSSM_movielenslatest_x1_001_945a31b2 --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.968624 | 0.212998 |
-
-
-### Logs
-```python
-2022-06-13 08:57:52,198 P303 INFO {
- "batch_norm": "True",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Movielens/",
- "dataset_id": "movielenslatest_x1_06dcf7a5",
- "debug": "False",
- "embedding_dim": "10",
- "embedding_regularizer": "0.01",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': 'user_id', 'source': 'user', 'type': 'categorical'}, {'active': True, 'dtype': 'float', 'name': ['item_id', 'tag_id'], 'source': 'item', 'type': 'categorical'}]",
- "gpu": "0",
- "item_tower_activations": "ReLU",
- "item_tower_dropout": "0.2",
- "item_tower_units": "[400, 400, 400]",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "DSSM",
- "model_id": "DSSM_movielenslatest_x1_001_945a31b2",
- "model_root": "./Movielens/DSSM_movielenslatest_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_regularizer": "0",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
- "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
- "use_hdf5": "True",
- "user_tower_activations": "ReLU",
- "user_tower_dropout": "0.2",
- "user_tower_units": "[400, 400, 400]",
- "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch"
-}
-2022-06-13 08:57:52,199 P303 INFO Set up feature encoder...
-2022-06-13 08:57:52,200 P303 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_06dcf7a5/feature_map.json
-2022-06-13 08:57:52,200 P303 INFO Loading data...
-2022-06-13 08:57:52,203 P303 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_06dcf7a5/train.h5
-2022-06-13 08:57:52,232 P303 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_06dcf7a5/valid.h5
-2022-06-13 08:57:52,244 P303 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
-2022-06-13 08:57:52,244 P303 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
-2022-06-13 08:57:52,244 P303 INFO Loading train data done.
-2022-06-13 08:57:55,243 P303 INFO Total number of parameters: 1559990.
-2022-06-13 08:57:55,244 P303 INFO Start training: 343 batches/epoch
-2022-06-13 08:57:55,244 P303 INFO ************ Epoch=1 start ************
-2022-06-13 08:58:13,835 P303 INFO [Metrics] AUC: 0.933130 - logloss: 0.298132
-2022-06-13 08:58:13,836 P303 INFO Save best model: monitor(max): 0.933130
-2022-06-13 08:58:13,850 P303 INFO --- 343/343 batches finished ---
-2022-06-13 08:58:13,908 P303 INFO Train loss: 0.455415
-2022-06-13 08:58:13,908 P303 INFO ************ Epoch=1 end ************
-2022-06-13 08:58:32,351 P303 INFO [Metrics] AUC: 0.939931 - logloss: 0.296643
-2022-06-13 08:58:32,352 P303 INFO Save best model: monitor(max): 0.939931
-2022-06-13 08:58:32,365 P303 INFO --- 343/343 batches finished ---
-2022-06-13 08:58:32,435 P303 INFO Train loss: 0.363026
-2022-06-13 08:58:32,435 P303 INFO ************ Epoch=2 end ************
-2022-06-13 08:58:50,371 P303 INFO [Metrics] AUC: 0.946660 - logloss: 0.281723
-2022-06-13 08:58:50,372 P303 INFO Save best model: monitor(max): 0.946660
-2022-06-13 08:58:50,385 P303 INFO --- 343/343 batches finished ---
-2022-06-13 08:58:50,447 P303 INFO Train loss: 0.370799
-2022-06-13 08:58:50,447 P303 INFO ************ Epoch=3 end ************
-2022-06-13 08:59:08,319 P303 INFO [Metrics] AUC: 0.948210 - logloss: 0.265915
-2022-06-13 08:59:08,320 P303 INFO Save best model: monitor(max): 0.948210
-2022-06-13 08:59:08,332 P303 INFO --- 343/343 batches finished ---
-2022-06-13 08:59:08,415 P303 INFO Train loss: 0.373187
-2022-06-13 08:59:08,415 P303 INFO ************ Epoch=4 end ************
-2022-06-13 08:59:26,250 P303 INFO [Metrics] AUC: 0.951196 - logloss: 0.249135
-2022-06-13 08:59:26,252 P303 INFO Save best model: monitor(max): 0.951196
-2022-06-13 08:59:26,265 P303 INFO --- 343/343 batches finished ---
-2022-06-13 08:59:26,314 P303 INFO Train loss: 0.374471
-2022-06-13 08:59:26,314 P303 INFO ************ Epoch=5 end ************
-2022-06-13 08:59:44,485 P303 INFO [Metrics] AUC: 0.953276 - logloss: 0.243052
-2022-06-13 08:59:44,486 P303 INFO Save best model: monitor(max): 0.953276
-2022-06-13 08:59:44,499 P303 INFO --- 343/343 batches finished ---
-2022-06-13 08:59:44,548 P303 INFO Train loss: 0.378173
-2022-06-13 08:59:44,548 P303 INFO ************ Epoch=6 end ************
-2022-06-13 09:00:02,650 P303 INFO [Metrics] AUC: 0.954119 - logloss: 0.240459
-2022-06-13 09:00:02,651 P303 INFO Save best model: monitor(max): 0.954119
-2022-06-13 09:00:02,661 P303 INFO --- 343/343 batches finished ---
-2022-06-13 09:00:02,718 P303 INFO Train loss: 0.379224
-2022-06-13 09:00:02,718 P303 INFO ************ Epoch=7 end ************
-2022-06-13 09:00:19,077 P303 INFO [Metrics] AUC: 0.954879 - logloss: 0.239145
-2022-06-13 09:00:19,078 P303 INFO Save best model: monitor(max): 0.954879
-2022-06-13 09:00:19,093 P303 INFO --- 343/343 batches finished ---
-2022-06-13 09:00:19,134 P303 INFO Train loss: 0.379803
-2022-06-13 09:00:19,134 P303 INFO ************ Epoch=8 end ************
-2022-06-13 09:00:34,525 P303 INFO [Metrics] AUC: 0.946124 - logloss: 0.312645
-2022-06-13 09:00:34,526 P303 INFO Monitor(max) STOP: 0.946124 !
-2022-06-13 09:00:34,526 P303 INFO Reduce learning rate on plateau: 0.000100
-2022-06-13 09:00:34,526 P303 INFO --- 343/343 batches finished ---
-2022-06-13 09:00:34,569 P303 INFO Train loss: 0.382595
-2022-06-13 09:00:34,569 P303 INFO ************ Epoch=9 end ************
-2022-06-13 09:00:49,796 P303 INFO [Metrics] AUC: 0.967127 - logloss: 0.208134
-2022-06-13 09:00:49,797 P303 INFO Save best model: monitor(max): 0.967127
-2022-06-13 09:00:49,806 P303 INFO --- 343/343 batches finished ---
-2022-06-13 09:00:49,859 P303 INFO Train loss: 0.285253
-2022-06-13 09:00:49,860 P303 INFO ************ Epoch=10 end ************
-2022-06-13 09:01:05,454 P303 INFO [Metrics] AUC: 0.969027 - logloss: 0.211825
-2022-06-13 09:01:05,454 P303 INFO Save best model: monitor(max): 0.969027
-2022-06-13 09:01:05,467 P303 INFO --- 343/343 batches finished ---
-2022-06-13 09:01:05,525 P303 INFO Train loss: 0.197720
-2022-06-13 09:01:05,525 P303 INFO ************ Epoch=11 end ************
-2022-06-13 09:01:20,880 P303 INFO [Metrics] AUC: 0.968178 - logloss: 0.234724
-2022-06-13 09:01:20,881 P303 INFO Monitor(max) STOP: 0.968178 !
-2022-06-13 09:01:20,881 P303 INFO Reduce learning rate on plateau: 0.000010
-2022-06-13 09:01:20,881 P303 INFO --- 343/343 batches finished ---
-2022-06-13 09:01:20,935 P303 INFO Train loss: 0.152829
-2022-06-13 09:01:20,935 P303 INFO ************ Epoch=12 end ************
-2022-06-13 09:01:32,498 P303 INFO [Metrics] AUC: 0.967691 - logloss: 0.269013
-2022-06-13 09:01:32,499 P303 INFO Monitor(max) STOP: 0.967691 !
-2022-06-13 09:01:32,499 P303 INFO Reduce learning rate on plateau: 0.000001
-2022-06-13 09:01:32,499 P303 INFO Early stopping at epoch=13
-2022-06-13 09:01:32,500 P303 INFO --- 343/343 batches finished ---
-2022-06-13 09:01:32,566 P303 INFO Train loss: 0.118317
-2022-06-13 09:01:32,567 P303 INFO Training finished.
-2022-06-13 09:01:32,567 P303 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Movielens/DSSM_movielenslatest_x1/movielenslatest_x1_06dcf7a5/DSSM_movielenslatest_x1_001_945a31b2.model
-2022-06-13 09:01:32,612 P303 INFO ****** Validation evaluation ******
-2022-06-13 09:01:34,111 P303 INFO [Metrics] AUC: 0.969027 - logloss: 0.211825
-2022-06-13 09:01:34,162 P303 INFO ******** Test evaluation ********
-2022-06-13 09:01:34,162 P303 INFO Loading data...
-2022-06-13 09:01:34,163 P303 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_06dcf7a5/test.h5
-2022-06-13 09:01:34,168 P303 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
-2022-06-13 09:01:34,168 P303 INFO Loading test data done.
-2022-06-13 09:01:34,867 P303 INFO [Metrics] AUC: 0.968624 - logloss: 0.212998
-
-```
+## DSSM_movielenslatest_x1
+
+A hands-on guide to run the DSSM model on the MovielensLatest_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 11.4
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.2.1
+ ```
+
+### Dataset
+Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [DSSM](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/DSSM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DSSM_movielenslatest_x1_tuner_config_01](./DSSM_movielenslatest_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd DSSM_movielenslatest_x1
+ nohup python run_expid.py --config ./DSSM_movielenslatest_x1_tuner_config_01 --expid DSSM_movielenslatest_x1_001_945a31b2 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.968624 | 0.212998 |
+
+
+### Logs
+```python
+2022-06-13 08:57:52,198 P303 INFO {
+ "batch_norm": "True",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Movielens/",
+ "dataset_id": "movielenslatest_x1_06dcf7a5",
+ "debug": "False",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.01",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': 'user_id', 'source': 'user', 'type': 'categorical'}, {'active': True, 'dtype': 'float', 'name': ['item_id', 'tag_id'], 'source': 'item', 'type': 'categorical'}]",
+ "gpu": "0",
+ "item_tower_activations": "ReLU",
+ "item_tower_dropout": "0.2",
+ "item_tower_units": "[400, 400, 400]",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "DSSM",
+ "model_id": "DSSM_movielenslatest_x1_001_945a31b2",
+ "model_root": "./Movielens/DSSM_movielenslatest_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
+ "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
+ "use_hdf5": "True",
+ "user_tower_activations": "ReLU",
+ "user_tower_dropout": "0.2",
+ "user_tower_units": "[400, 400, 400]",
+ "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch"
+}
+2022-06-13 08:57:52,199 P303 INFO Set up feature encoder...
+2022-06-13 08:57:52,200 P303 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_06dcf7a5/feature_map.json
+2022-06-13 08:57:52,200 P303 INFO Loading data...
+2022-06-13 08:57:52,203 P303 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_06dcf7a5/train.h5
+2022-06-13 08:57:52,232 P303 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_06dcf7a5/valid.h5
+2022-06-13 08:57:52,244 P303 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
+2022-06-13 08:57:52,244 P303 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
+2022-06-13 08:57:52,244 P303 INFO Loading train data done.
+2022-06-13 08:57:55,243 P303 INFO Total number of parameters: 1559990.
+2022-06-13 08:57:55,244 P303 INFO Start training: 343 batches/epoch
+2022-06-13 08:57:55,244 P303 INFO ************ Epoch=1 start ************
+2022-06-13 08:58:13,835 P303 INFO [Metrics] AUC: 0.933130 - logloss: 0.298132
+2022-06-13 08:58:13,836 P303 INFO Save best model: monitor(max): 0.933130
+2022-06-13 08:58:13,850 P303 INFO --- 343/343 batches finished ---
+2022-06-13 08:58:13,908 P303 INFO Train loss: 0.455415
+2022-06-13 08:58:13,908 P303 INFO ************ Epoch=1 end ************
+2022-06-13 08:58:32,351 P303 INFO [Metrics] AUC: 0.939931 - logloss: 0.296643
+2022-06-13 08:58:32,352 P303 INFO Save best model: monitor(max): 0.939931
+2022-06-13 08:58:32,365 P303 INFO --- 343/343 batches finished ---
+2022-06-13 08:58:32,435 P303 INFO Train loss: 0.363026
+2022-06-13 08:58:32,435 P303 INFO ************ Epoch=2 end ************
+2022-06-13 08:58:50,371 P303 INFO [Metrics] AUC: 0.946660 - logloss: 0.281723
+2022-06-13 08:58:50,372 P303 INFO Save best model: monitor(max): 0.946660
+2022-06-13 08:58:50,385 P303 INFO --- 343/343 batches finished ---
+2022-06-13 08:58:50,447 P303 INFO Train loss: 0.370799
+2022-06-13 08:58:50,447 P303 INFO ************ Epoch=3 end ************
+2022-06-13 08:59:08,319 P303 INFO [Metrics] AUC: 0.948210 - logloss: 0.265915
+2022-06-13 08:59:08,320 P303 INFO Save best model: monitor(max): 0.948210
+2022-06-13 08:59:08,332 P303 INFO --- 343/343 batches finished ---
+2022-06-13 08:59:08,415 P303 INFO Train loss: 0.373187
+2022-06-13 08:59:08,415 P303 INFO ************ Epoch=4 end ************
+2022-06-13 08:59:26,250 P303 INFO [Metrics] AUC: 0.951196 - logloss: 0.249135
+2022-06-13 08:59:26,252 P303 INFO Save best model: monitor(max): 0.951196
+2022-06-13 08:59:26,265 P303 INFO --- 343/343 batches finished ---
+2022-06-13 08:59:26,314 P303 INFO Train loss: 0.374471
+2022-06-13 08:59:26,314 P303 INFO ************ Epoch=5 end ************
+2022-06-13 08:59:44,485 P303 INFO [Metrics] AUC: 0.953276 - logloss: 0.243052
+2022-06-13 08:59:44,486 P303 INFO Save best model: monitor(max): 0.953276
+2022-06-13 08:59:44,499 P303 INFO --- 343/343 batches finished ---
+2022-06-13 08:59:44,548 P303 INFO Train loss: 0.378173
+2022-06-13 08:59:44,548 P303 INFO ************ Epoch=6 end ************
+2022-06-13 09:00:02,650 P303 INFO [Metrics] AUC: 0.954119 - logloss: 0.240459
+2022-06-13 09:00:02,651 P303 INFO Save best model: monitor(max): 0.954119
+2022-06-13 09:00:02,661 P303 INFO --- 343/343 batches finished ---
+2022-06-13 09:00:02,718 P303 INFO Train loss: 0.379224
+2022-06-13 09:00:02,718 P303 INFO ************ Epoch=7 end ************
+2022-06-13 09:00:19,077 P303 INFO [Metrics] AUC: 0.954879 - logloss: 0.239145
+2022-06-13 09:00:19,078 P303 INFO Save best model: monitor(max): 0.954879
+2022-06-13 09:00:19,093 P303 INFO --- 343/343 batches finished ---
+2022-06-13 09:00:19,134 P303 INFO Train loss: 0.379803
+2022-06-13 09:00:19,134 P303 INFO ************ Epoch=8 end ************
+2022-06-13 09:00:34,525 P303 INFO [Metrics] AUC: 0.946124 - logloss: 0.312645
+2022-06-13 09:00:34,526 P303 INFO Monitor(max) STOP: 0.946124 !
+2022-06-13 09:00:34,526 P303 INFO Reduce learning rate on plateau: 0.000100
+2022-06-13 09:00:34,526 P303 INFO --- 343/343 batches finished ---
+2022-06-13 09:00:34,569 P303 INFO Train loss: 0.382595
+2022-06-13 09:00:34,569 P303 INFO ************ Epoch=9 end ************
+2022-06-13 09:00:49,796 P303 INFO [Metrics] AUC: 0.967127 - logloss: 0.208134
+2022-06-13 09:00:49,797 P303 INFO Save best model: monitor(max): 0.967127
+2022-06-13 09:00:49,806 P303 INFO --- 343/343 batches finished ---
+2022-06-13 09:00:49,859 P303 INFO Train loss: 0.285253
+2022-06-13 09:00:49,860 P303 INFO ************ Epoch=10 end ************
+2022-06-13 09:01:05,454 P303 INFO [Metrics] AUC: 0.969027 - logloss: 0.211825
+2022-06-13 09:01:05,454 P303 INFO Save best model: monitor(max): 0.969027
+2022-06-13 09:01:05,467 P303 INFO --- 343/343 batches finished ---
+2022-06-13 09:01:05,525 P303 INFO Train loss: 0.197720
+2022-06-13 09:01:05,525 P303 INFO ************ Epoch=11 end ************
+2022-06-13 09:01:20,880 P303 INFO [Metrics] AUC: 0.968178 - logloss: 0.234724
+2022-06-13 09:01:20,881 P303 INFO Monitor(max) STOP: 0.968178 !
+2022-06-13 09:01:20,881 P303 INFO Reduce learning rate on plateau: 0.000010
+2022-06-13 09:01:20,881 P303 INFO --- 343/343 batches finished ---
+2022-06-13 09:01:20,935 P303 INFO Train loss: 0.152829
+2022-06-13 09:01:20,935 P303 INFO ************ Epoch=12 end ************
+2022-06-13 09:01:32,498 P303 INFO [Metrics] AUC: 0.967691 - logloss: 0.269013
+2022-06-13 09:01:32,499 P303 INFO Monitor(max) STOP: 0.967691 !
+2022-06-13 09:01:32,499 P303 INFO Reduce learning rate on plateau: 0.000001
+2022-06-13 09:01:32,499 P303 INFO Early stopping at epoch=13
+2022-06-13 09:01:32,500 P303 INFO --- 343/343 batches finished ---
+2022-06-13 09:01:32,566 P303 INFO Train loss: 0.118317
+2022-06-13 09:01:32,567 P303 INFO Training finished.
+2022-06-13 09:01:32,567 P303 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Movielens/DSSM_movielenslatest_x1/movielenslatest_x1_06dcf7a5/DSSM_movielenslatest_x1_001_945a31b2.model
+2022-06-13 09:01:32,612 P303 INFO ****** Validation evaluation ******
+2022-06-13 09:01:34,111 P303 INFO [Metrics] AUC: 0.969027 - logloss: 0.211825
+2022-06-13 09:01:34,162 P303 INFO ******** Test evaluation ********
+2022-06-13 09:01:34,162 P303 INFO Loading data...
+2022-06-13 09:01:34,163 P303 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_06dcf7a5/test.h5
+2022-06-13 09:01:34,168 P303 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
+2022-06-13 09:01:34,168 P303 INFO Loading test data done.
+2022-06-13 09:01:34,867 P303 INFO [Metrics] AUC: 0.968624 - logloss: 0.212998
+
+```
diff --git a/ranking/ctr/DeepCrossing/DeepCross_avazu_x4_001/README.md b/ranking/ctr/DeepCrossing/DeepCross_avazu_x4_001/README.md
index e030fefa..9c4b8f1f 100644
--- a/ranking/ctr/DeepCrossing/DeepCross_avazu_x4_001/README.md
+++ b/ranking/ctr/DeepCrossing/DeepCross_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepCrossing model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepCrossing](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepCrossing.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepCrossing](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepCrossing.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepCrossing/DeepCross_avazu_x4_002/README.md b/ranking/ctr/DeepCrossing/DeepCross_avazu_x4_002/README.md
index 1d780154..2374727e 100644
--- a/ranking/ctr/DeepCrossing/DeepCross_avazu_x4_002/README.md
+++ b/ranking/ctr/DeepCrossing/DeepCross_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepCrossing model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepCrossing](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepCrossing.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepCrossing](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepCrossing.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepCrossing/DeepCross_criteo_x4_001/README.md b/ranking/ctr/DeepCrossing/DeepCross_criteo_x4_001/README.md
index c78d8784..95d1e2c9 100644
--- a/ranking/ctr/DeepCrossing/DeepCross_criteo_x4_001/README.md
+++ b/ranking/ctr/DeepCrossing/DeepCross_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepCrossing model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepCrossing](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepCrossing.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepCrossing](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepCrossing.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepCrossing/DeepCross_criteo_x4_002/README.md b/ranking/ctr/DeepCrossing/DeepCross_criteo_x4_002/README.md
index ac353c71..a9249435 100644
--- a/ranking/ctr/DeepCrossing/DeepCross_criteo_x4_002/README.md
+++ b/ranking/ctr/DeepCrossing/DeepCross_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepCrossing model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepCrossing](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepCrossing.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepCrossing](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepCrossing.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepCrossing/DeepCross_kkbox_x1/README.md b/ranking/ctr/DeepCrossing/DeepCross_kkbox_x1/README.md
index 63bea5c4..0a3088b2 100644
--- a/ranking/ctr/DeepCrossing/DeepCross_kkbox_x1/README.md
+++ b/ranking/ctr/DeepCrossing/DeepCross_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepCrossing model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepCrossing](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepCrossing.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepCrossing](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepCrossing.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepFM/DeepFM_amazonelectronics_x1/README.md b/ranking/ctr/DeepFM/DeepFM_amazonelectronics_x1/README.md
index fa1d196c..b06a61a3 100644
--- a/ranking/ctr/DeepFM/DeepFM_amazonelectronics_x1/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DeepFM/DeepFM_avazu_x1/README.md b/ranking/ctr/DeepFM/DeepFM_avazu_x1/README.md
index f2a5ef35..dc0b86ff 100644
--- a/ranking/ctr/DeepFM/DeepFM_avazu_x1/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepFM/DeepFM_avazu_x4_001/README.md b/ranking/ctr/DeepFM/DeepFM_avazu_x4_001/README.md
index 49608e24..2d22a4ad 100644
--- a/ranking/ctr/DeepFM/DeepFM_avazu_x4_001/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_avazu_x4_001/README.md
@@ -1,151 +1,151 @@
-## DeepFM_avazu_x4_001
-
-A hands-on guide to run the DeepFM model on the Avazu_x4_001 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.0
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.0.2
- ```
-
-### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepFM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Avazu/Avazu_x4`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DeepFM_avazu_x4_tuner_config_01](./DeepFM_avazu_x4_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd DeepFM_avazu_x4_001
- nohup python run_expid.py --config ./DeepFM_avazu_x4_tuner_config_01 --expid DeepFM_avazu_x4_019_9e93795e --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| logloss | AUC |
-|:--------------------:|:--------------------:|
-| 0.371901 | 0.792990 |
-
-
-### Logs
-```python
-2020-06-12 16:12:51,065 P7047 INFO {
- "batch_norm": "False",
- "batch_size": "10000",
- "data_format": "h5",
- "data_root": "../data/Avazu/",
- "dataset_id": "avazu_x4_3bbbc4c9",
- "debug": "False",
- "embedding_dim": "16",
- "embedding_dropout": "0",
- "embedding_regularizer": "0",
- "epochs": "100",
- "every_x_epochs": "1",
- "gpu": "0",
- "hidden_activations": "relu",
- "hidden_units": "[2000, 2000, 2000, 2000]",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['logloss', 'AUC']",
- "model": "DeepFM",
- "model_id": "DeepFM_avazu_x4_3bbbc4c9_019_72fc2cd3",
- "model_root": "./Avazu/DeepFM_avazu/min2/",
- "monitor": "{'AUC': 1, 'logloss': -1}",
- "monitor_mode": "max",
- "net_dropout": "0",
- "net_regularizer": "0",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2019",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Avazu/avazu_x4_3bbbc4c9/test.h5",
- "train_data": "../data/Avazu/avazu_x4_3bbbc4c9/train.h5",
- "use_hdf5": "True",
- "valid_data": "../data/Avazu/avazu_x4_3bbbc4c9/valid.h5",
- "verbose": "1",
- "version": "pytorch",
- "workers": "3"
-}
-2020-06-12 16:12:51,065 P7047 INFO Set up feature encoder...
-2020-06-12 16:12:51,066 P7047 INFO Load feature_map from json: ../data/Avazu/avazu_x4_3bbbc4c9/feature_map.json
-2020-06-12 16:12:52,773 P7047 INFO Total number of parameters: 76544576.
-2020-06-12 16:12:52,773 P7047 INFO Loading data...
-2020-06-12 16:12:52,775 P7047 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/train.h5
-2020-06-12 16:12:55,644 P7047 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/valid.h5
-2020-06-12 16:12:56,884 P7047 INFO Train samples: total/32343172, pos/5492052, neg/26851120, ratio/16.98%
-2020-06-12 16:12:56,993 P7047 INFO Validation samples: total/4042897, pos/686507, neg/3356390, ratio/16.98%
-2020-06-12 16:12:56,993 P7047 INFO Loading train data done.
-2020-06-12 16:12:59,774 P7047 INFO Start training: 3235 batches/epoch
-2020-06-12 16:12:59,774 P7047 INFO ************ Epoch=1 start ************
-2020-06-12 16:27:51,766 P7047 INFO [Metrics] logloss: 0.372035 - AUC: 0.792794
-2020-06-12 16:27:51,767 P7047 INFO Save best model: monitor(max): 0.420759
-2020-06-12 16:27:52,043 P7047 INFO --- 3235/3235 batches finished ---
-2020-06-12 16:27:52,088 P7047 INFO Train loss: 0.380125
-2020-06-12 16:27:52,089 P7047 INFO ************ Epoch=1 end ************
-2020-06-12 16:42:42,369 P7047 INFO [Metrics] logloss: 0.380053 - AUC: 0.789567
-2020-06-12 16:42:42,373 P7047 INFO Monitor(max) STOP: 0.409514 !
-2020-06-12 16:42:42,373 P7047 INFO Reduce learning rate on plateau: 0.000100
-2020-06-12 16:42:42,373 P7047 INFO --- 3235/3235 batches finished ---
-2020-06-12 16:42:42,423 P7047 INFO Train loss: 0.332604
-2020-06-12 16:42:42,423 P7047 INFO ************ Epoch=2 end ************
-2020-06-12 16:57:32,253 P7047 INFO [Metrics] logloss: 0.426078 - AUC: 0.776283
-2020-06-12 16:57:32,256 P7047 INFO Monitor(max) STOP: 0.350205 !
-2020-06-12 16:57:32,256 P7047 INFO Reduce learning rate on plateau: 0.000010
-2020-06-12 16:57:32,256 P7047 INFO Early stopping at epoch=3
-2020-06-12 16:57:32,256 P7047 INFO --- 3235/3235 batches finished ---
-2020-06-12 16:57:32,303 P7047 INFO Train loss: 0.290542
-2020-06-12 16:57:32,303 P7047 INFO Training finished.
-2020-06-12 16:57:32,303 P7047 INFO Load best model: /home/XXX/benchmarks/Avazu/DeepFM_avazu/min2/avazu_x4_3bbbc4c9/DeepFM_avazu_x4_3bbbc4c9_019_72fc2cd3_model.ckpt
-2020-06-12 16:57:32,684 P7047 INFO ****** Train/validation evaluation ******
-2020-06-12 16:57:55,907 P7047 INFO [Metrics] logloss: 0.372035 - AUC: 0.792794
-2020-06-12 16:57:55,955 P7047 INFO ******** Test evaluation ********
-2020-06-12 16:57:55,955 P7047 INFO Loading data...
-2020-06-12 16:57:55,955 P7047 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/test.h5
-2020-06-12 16:57:56,471 P7047 INFO Test samples: total/4042898, pos/686507, neg/3356391, ratio/16.98%
-2020-06-12 16:57:56,471 P7047 INFO Loading test data done.
-2020-06-12 16:58:19,930 P7047 INFO [Metrics] logloss: 0.371901 - AUC: 0.792990
-
-```
+## DeepFM_avazu_x4_001
+
+A hands-on guide to run the DeepFM model on the Avazu_x4_001 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.60GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.0
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.0.2
+ ```
+
+### Dataset
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepFM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Avazu/Avazu_x4`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [DeepFM_avazu_x4_tuner_config_01](./DeepFM_avazu_x4_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd DeepFM_avazu_x4_001
+ nohup python run_expid.py --config ./DeepFM_avazu_x4_tuner_config_01 --expid DeepFM_avazu_x4_019_9e93795e --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| logloss | AUC |
+|:--------------------:|:--------------------:|
+| 0.371901 | 0.792990 |
+
+
+### Logs
+```python
+2020-06-12 16:12:51,065 P7047 INFO {
+ "batch_norm": "False",
+ "batch_size": "10000",
+ "data_format": "h5",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x4_3bbbc4c9",
+ "debug": "False",
+ "embedding_dim": "16",
+ "embedding_dropout": "0",
+ "embedding_regularizer": "0",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "gpu": "0",
+ "hidden_activations": "relu",
+ "hidden_units": "[2000, 2000, 2000, 2000]",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['logloss', 'AUC']",
+ "model": "DeepFM",
+ "model_id": "DeepFM_avazu_x4_3bbbc4c9_019_72fc2cd3",
+ "model_root": "./Avazu/DeepFM_avazu/min2/",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_dropout": "0",
+ "net_regularizer": "0",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/avazu_x4_3bbbc4c9/test.h5",
+ "train_data": "../data/Avazu/avazu_x4_3bbbc4c9/train.h5",
+ "use_hdf5": "True",
+ "valid_data": "../data/Avazu/avazu_x4_3bbbc4c9/valid.h5",
+ "verbose": "1",
+ "version": "pytorch",
+ "workers": "3"
+}
+2020-06-12 16:12:51,065 P7047 INFO Set up feature encoder...
+2020-06-12 16:12:51,066 P7047 INFO Load feature_map from json: ../data/Avazu/avazu_x4_3bbbc4c9/feature_map.json
+2020-06-12 16:12:52,773 P7047 INFO Total number of parameters: 76544576.
+2020-06-12 16:12:52,773 P7047 INFO Loading data...
+2020-06-12 16:12:52,775 P7047 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/train.h5
+2020-06-12 16:12:55,644 P7047 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/valid.h5
+2020-06-12 16:12:56,884 P7047 INFO Train samples: total/32343172, pos/5492052, neg/26851120, ratio/16.98%
+2020-06-12 16:12:56,993 P7047 INFO Validation samples: total/4042897, pos/686507, neg/3356390, ratio/16.98%
+2020-06-12 16:12:56,993 P7047 INFO Loading train data done.
+2020-06-12 16:12:59,774 P7047 INFO Start training: 3235 batches/epoch
+2020-06-12 16:12:59,774 P7047 INFO ************ Epoch=1 start ************
+2020-06-12 16:27:51,766 P7047 INFO [Metrics] logloss: 0.372035 - AUC: 0.792794
+2020-06-12 16:27:51,767 P7047 INFO Save best model: monitor(max): 0.420759
+2020-06-12 16:27:52,043 P7047 INFO --- 3235/3235 batches finished ---
+2020-06-12 16:27:52,088 P7047 INFO Train loss: 0.380125
+2020-06-12 16:27:52,089 P7047 INFO ************ Epoch=1 end ************
+2020-06-12 16:42:42,369 P7047 INFO [Metrics] logloss: 0.380053 - AUC: 0.789567
+2020-06-12 16:42:42,373 P7047 INFO Monitor(max) STOP: 0.409514 !
+2020-06-12 16:42:42,373 P7047 INFO Reduce learning rate on plateau: 0.000100
+2020-06-12 16:42:42,373 P7047 INFO --- 3235/3235 batches finished ---
+2020-06-12 16:42:42,423 P7047 INFO Train loss: 0.332604
+2020-06-12 16:42:42,423 P7047 INFO ************ Epoch=2 end ************
+2020-06-12 16:57:32,253 P7047 INFO [Metrics] logloss: 0.426078 - AUC: 0.776283
+2020-06-12 16:57:32,256 P7047 INFO Monitor(max) STOP: 0.350205 !
+2020-06-12 16:57:32,256 P7047 INFO Reduce learning rate on plateau: 0.000010
+2020-06-12 16:57:32,256 P7047 INFO Early stopping at epoch=3
+2020-06-12 16:57:32,256 P7047 INFO --- 3235/3235 batches finished ---
+2020-06-12 16:57:32,303 P7047 INFO Train loss: 0.290542
+2020-06-12 16:57:32,303 P7047 INFO Training finished.
+2020-06-12 16:57:32,303 P7047 INFO Load best model: /home/XXX/benchmarks/Avazu/DeepFM_avazu/min2/avazu_x4_3bbbc4c9/DeepFM_avazu_x4_3bbbc4c9_019_72fc2cd3_model.ckpt
+2020-06-12 16:57:32,684 P7047 INFO ****** Train/validation evaluation ******
+2020-06-12 16:57:55,907 P7047 INFO [Metrics] logloss: 0.372035 - AUC: 0.792794
+2020-06-12 16:57:55,955 P7047 INFO ******** Test evaluation ********
+2020-06-12 16:57:55,955 P7047 INFO Loading data...
+2020-06-12 16:57:55,955 P7047 INFO Loading data from h5: ../data/Avazu/avazu_x4_3bbbc4c9/test.h5
+2020-06-12 16:57:56,471 P7047 INFO Test samples: total/4042898, pos/686507, neg/3356391, ratio/16.98%
+2020-06-12 16:57:56,471 P7047 INFO Loading test data done.
+2020-06-12 16:58:19,930 P7047 INFO [Metrics] logloss: 0.371901 - AUC: 0.792990
+
+```
diff --git a/ranking/ctr/DeepFM/DeepFM_avazu_x4_002/README.md b/ranking/ctr/DeepFM/DeepFM_avazu_x4_002/README.md
index 1e4b2ead..b54e798d 100644
--- a/ranking/ctr/DeepFM/DeepFM_avazu_x4_002/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepFM/DeepFM_criteo_x1/README.md b/ranking/ctr/DeepFM/DeepFM_criteo_x1/README.md
index 77f96eec..22f12127 100644
--- a/ranking/ctr/DeepFM/DeepFM_criteo_x1/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepFM/DeepFM_criteo_x4_001/README.md b/ranking/ctr/DeepFM/DeepFM_criteo_x4_001/README.md
index 9bb1a1dc..d2fcd733 100644
--- a/ranking/ctr/DeepFM/DeepFM_criteo_x4_001/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepFM/DeepFM_criteo_x4_002/README.md b/ranking/ctr/DeepFM/DeepFM_criteo_x4_002/README.md
index f66d4474..3f14a016 100644
--- a/ranking/ctr/DeepFM/DeepFM_criteo_x4_002/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepFM/DeepFM_frappe_x1/README.md b/ranking/ctr/DeepFM/DeepFM_frappe_x1/README.md
index 8db6a4dd..9044539f 100644
--- a/ranking/ctr/DeepFM/DeepFM_frappe_x1/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -36,15 +36,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepFM/DeepFM_kkbox_x1/README.md b/ranking/ctr/DeepFM/DeepFM_kkbox_x1/README.md
index 0b9453fb..f4f82d68 100644
--- a/ranking/ctr/DeepFM/DeepFM_kkbox_x1/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/KKBox/README.md#KKBox_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [KKBox_x1](https://github.com/reczoo/Datasets/tree/main/KKBox/KKBox_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/DeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepFM/DeepFM_kuaivideo_x1/README.md b/ranking/ctr/DeepFM/DeepFM_kuaivideo_x1/README.md
index 87ff65a3..a7760e30 100644
--- a/ranking/ctr/DeepFM/DeepFM_kuaivideo_x1/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DeepFM/DeepFM_microvideo1.7m_x1/README.md b/ranking/ctr/DeepFM/DeepFM_microvideo1.7m_x1/README.md
index 9cf9e3ee..d3313354 100644
--- a/ranking/ctr/DeepFM/DeepFM_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DeepFM/DeepFM_movielenslatest_x1/README.md b/ranking/ctr/DeepFM/DeepFM_movielenslatest_x1/README.md
index 833025cf..bad385ce 100644
--- a/ranking/ctr/DeepFM/DeepFM_movielenslatest_x1/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -36,15 +36,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepFM/DeepFM_taobaoad_x1/README.md b/ranking/ctr/DeepFM/DeepFM_taobaoad_x1/README.md
index bc1b0e73..13728c10 100644
--- a/ranking/ctr/DeepFM/DeepFM_taobaoad_x1/README.md
+++ b/ranking/ctr/DeepFM/DeepFM_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepFM model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [DeepFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/DeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [DeepFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/DeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/DeepIM/DeepIM_avazu_x1/README.md b/ranking/ctr/DeepIM/DeepIM_avazu_x1/README.md
index aa96dfd0..e8c62b6d 100644
--- a/ranking/ctr/DeepIM/DeepIM_avazu_x1/README.md
+++ b/ranking/ctr/DeepIM/DeepIM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepIM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepIM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepIM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepIM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepIM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepIM/DeepIM_criteo_x1/README.md b/ranking/ctr/DeepIM/DeepIM_criteo_x1/README.md
index 4b07e238..880feca0 100644
--- a/ranking/ctr/DeepIM/DeepIM_criteo_x1/README.md
+++ b/ranking/ctr/DeepIM/DeepIM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepIM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepIM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepIM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepIM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepIM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepIM/DeepIM_frappe_x1/README.md b/ranking/ctr/DeepIM/DeepIM_frappe_x1/README.md
index 25a74738..0913598e 100644
--- a/ranking/ctr/DeepIM/DeepIM_frappe_x1/README.md
+++ b/ranking/ctr/DeepIM/DeepIM_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepIM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -36,15 +36,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepIM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepIM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepIM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepIM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/DeepIM/DeepIM_movielenslatest_x1/README.md b/ranking/ctr/DeepIM/DeepIM_movielenslatest_x1/README.md
index 92980881..6eabe270 100644
--- a/ranking/ctr/DeepIM/DeepIM_movielenslatest_x1/README.md
+++ b/ranking/ctr/DeepIM/DeepIM_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the DeepIM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -36,15 +36,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepIM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepIM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [DeepIM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/DeepIM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/EDCN/EDCN_avazu_x1/README.md b/ranking/ctr/EDCN/EDCN_avazu_x1/README.md
index 8cea1de7..62e6663d 100644
--- a/ranking/ctr/EDCN/EDCN_avazu_x1/README.md
+++ b/ranking/ctr/EDCN/EDCN_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the EDCN model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [EDCN](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/EDCN.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [EDCN](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/EDCN.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -185,4 +185,4 @@ Running steps:
### Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/EDCN/EDCN_avazu_x1): deprecated due to bug fix [#29](https://github.com/xue-pai/FuxiCTR/issues/29) of FuxiCTR.
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/EDCN/EDCN_avazu_x1): deprecated due to bug fix [#29](https://github.com/reczoo/FuxiCTR/issues/29) of FuxiCTR.
diff --git a/ranking/ctr/EDCN/EDCN_criteo_x1/README.md b/ranking/ctr/EDCN/EDCN_criteo_x1/README.md
index f0d8e19b..2397b8dd 100644
--- a/ranking/ctr/EDCN/EDCN_criteo_x1/README.md
+++ b/ranking/ctr/EDCN/EDCN_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the EDCN model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [EDCN](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/EDCN.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [EDCN](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/EDCN.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -224,4 +224,4 @@ Running steps:
### Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/EDCN/EDCN_criteo_x1): deprecated due to bug fix [#29](https://github.com/xue-pai/FuxiCTR/issues/29) of FuxiCTR.
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/EDCN/EDCN_criteo_x1): deprecated due to bug fix [#29](https://github.com/reczoo/FuxiCTR/issues/29) of FuxiCTR.
diff --git a/ranking/ctr/EDCN/EDCN_frappe_x1/README.md b/ranking/ctr/EDCN/EDCN_frappe_x1/README.md
index 75964f35..d3189127 100644
--- a/ranking/ctr/EDCN/EDCN_frappe_x1/README.md
+++ b/ranking/ctr/EDCN/EDCN_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the EDCN model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [EDCN](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/EDCN.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [EDCN](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/EDCN.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -299,4 +299,4 @@ Running steps:
### Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/EDCN/EDCN_frappe_x1): deprecated due to bug fix [#29](https://github.com/xue-pai/FuxiCTR/issues/29) of FuxiCTR.
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/EDCN/EDCN_frappe_x1): deprecated due to bug fix [#29](https://github.com/reczoo/FuxiCTR/issues/29) of FuxiCTR.
diff --git a/ranking/ctr/EDCN/EDCN_movielenslatest_x1/README.md b/ranking/ctr/EDCN/EDCN_movielenslatest_x1/README.md
index f2ec3998..dadf122a 100644
--- a/ranking/ctr/EDCN/EDCN_movielenslatest_x1/README.md
+++ b/ranking/ctr/EDCN/EDCN_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the EDCN model on the MovielensLatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
@@ -40,11 +40,11 @@ Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/mast
### Code
-We use [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [EDCN](https://github.com/xue-pai/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/EDCN.py).
+We use [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/tree/v1.2.2) for this experiment. See the model code: [EDCN](https://github.com/reczoo/FuxiCTR/blob/v1.2.2/fuxictr/pytorch/models/EDCN.py).
Running steps:
-1. Download [FuxiCTR-v1.2.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [fuxictr_version.py](./fuxictr_version.py#L3) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
@@ -249,4 +249,4 @@ Running steps:
### Revision History
-- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/EDCN/EDCN_movielenslatest_x1): deprecated due to bug fix [#29](https://github.com/xue-pai/FuxiCTR/issues/29) of FuxiCTR.
+- [Version 1](https://github.com/openbenchmark/BARS/tree/88d3a0faa4565e975141ae89a52d35d3a8b56eda/ctr_prediction/benchmarks/EDCN/EDCN_movielenslatest_x1): deprecated due to bug fix [#29](https://github.com/reczoo/FuxiCTR/issues/29) of FuxiCTR.
diff --git a/ranking/ctr/FFM/FFM_avazu_x1/README.md b/ranking/ctr/FFM/FFM_avazu_x1/README.md
index 2c9e4504..20fe614f 100644
--- a/ranking/ctr/FFM/FFM_avazu_x1/README.md
+++ b/ranking/ctr/FFM/FFM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FFM/FFM_avazu_x4_001/README.md b/ranking/ctr/FFM/FFM_avazu_x4_001/README.md
index 3b787784..1aab3165 100644
--- a/ranking/ctr/FFM/FFM_avazu_x4_001/README.md
+++ b/ranking/ctr/FFM/FFM_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FFM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FFM/FFM_avazu_x4_002/README.md b/ranking/ctr/FFM/FFM_avazu_x4_002/README.md
index 769cf66a..0162b654 100644
--- a/ranking/ctr/FFM/FFM_avazu_x4_002/README.md
+++ b/ranking/ctr/FFM/FFM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FFM/FFM_criteo_x1/README.md b/ranking/ctr/FFM/FFM_criteo_x1/README.md
index c717f38a..64f08b3e 100644
--- a/ranking/ctr/FFM/FFM_criteo_x1/README.md
+++ b/ranking/ctr/FFM/FFM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FFM/FFM_criteo_x4_001/README.md b/ranking/ctr/FFM/FFM_criteo_x4_001/README.md
index 3da09abc..8a94efb4 100644
--- a/ranking/ctr/FFM/FFM_criteo_x4_001/README.md
+++ b/ranking/ctr/FFM/FFM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FFM/FFM_criteo_x4_002/README.md b/ranking/ctr/FFM/FFM_criteo_x4_002/README.md
index 6ca43966..c3c1e834 100644
--- a/ranking/ctr/FFM/FFM_criteo_x4_002/README.md
+++ b/ranking/ctr/FFM/FFM_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FFM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FFM/FFM_frappe_x1/README.md b/ranking/ctr/FFM/FFM_frappe_x1/README.md
index 73e05bf6..c90b7f3e 100644
--- a/ranking/ctr/FFM/FFM_frappe_x1/README.md
+++ b/ranking/ctr/FFM/FFM_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FFM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FFM/FFM_kkbox_x1/README.md b/ranking/ctr/FFM/FFM_kkbox_x1/README.md
index f954560b..1bf6bdb8 100644
--- a/ranking/ctr/FFM/FFM_kkbox_x1/README.md
+++ b/ranking/ctr/FFM/FFM_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FFM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FFM/FFM_movielenslatest_x1/README.md b/ranking/ctr/FFM/FFM_movielenslatest_x1/README.md
index 37698559..5db60374 100644
--- a/ranking/ctr/FFM/FFM_movielenslatest_x1/README.md
+++ b/ranking/ctr/FFM/FFM_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FFM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FGCNN/FGCNN_avazu_x4_001/README.md b/ranking/ctr/FGCNN/FGCNN_avazu_x4_001/README.md
index 6d194071..b3a58778 100644
--- a/ranking/ctr/FGCNN/FGCNN_avazu_x4_001/README.md
+++ b/ranking/ctr/FGCNN/FGCNN_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FGCNN model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FGCNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FGCNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FGCNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FGCNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FGCNN/FGCNN_avazu_x4_002/README.md b/ranking/ctr/FGCNN/FGCNN_avazu_x4_002/README.md
index a597d78e..3ef0d606 100644
--- a/ranking/ctr/FGCNN/FGCNN_avazu_x4_002/README.md
+++ b/ranking/ctr/FGCNN/FGCNN_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FGCNN model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FGCNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FGCNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FGCNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FGCNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FGCNN/FGCNN_criteo_x4_001/README.md b/ranking/ctr/FGCNN/FGCNN_criteo_x4_001/README.md
index 421d58e3..61e83744 100644
--- a/ranking/ctr/FGCNN/FGCNN_criteo_x4_001/README.md
+++ b/ranking/ctr/FGCNN/FGCNN_criteo_x4_001/README.md
@@ -1,192 +1,192 @@
-## FGCNN_Criteo_x4_001
-
-A notebook to benchmark FGCNN on Criteo_x4_001 dataset.
-
-Author: [XUEPAI Team](https://github.com/xue-pai)
-
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- RAM: 500G+
- ```
-+ Software
-
- ```python
- python: 3.6.5
- pandas: 1.0.0
- numpy: 1.18.1
- ```
-
-### Dataset
-In this setting, we preprocess the data split by removing the id field that is useless for CTR prediction. In addition, we transform the timestamp field into three fields: hour, weekday, and is_weekend. For all categorical fields, we filter infrequent features by setting the threshold min_category_count=2 (performs well) and replace them with a default token. Note that we do not follow the exact preprocessing steps in AutoInt, because the authors neither remove the useless id field nor specially preprocess the timestamp field.
-
-To make a fair comparison, we fix embedding_dim=16 as with AutoInt.
-
-
-### Code
-1. Install FuxiCTR
-
- Install FuxiCTR via `pip install fuxictr==1.0` to get all dependencies ready. Then download [the FuxiCTR repository](https://github.com/huawei-noah/benchmark/archive/53e314461c19dbc7f462b42bf0f0bfae020dc398.zip) to your local path.
-
-2. Downalod the dataset and run [the preprocessing script](https://github.com/xue-pai/Open-CTR-Benchmark/blob/master/datasets/Criteo/Criteo_x4/split_criteo_x4.py) for data splitting.
-
-3. Download the hyper-parameter configuration file: [FGCNN_criteo_x4_tuner_config_01.yaml](./FGCNN_criteo_x4_tuner_config_01.yaml)
-
-4. Run the following script to reproduce the result.
- + --config: The config file that defines the tuning space
- + --tag: Specify which expid to run (each expid corresponds to a specific setting of hyper-parameters in the tunner space)
- + --gpu: The available gpus for parameters tuning.
-
- ```bash
- cd FuxiCTR/benchmarks
- python run_param_tuner.py --config YOUR_PATH/FGCNN_criteo_x4_tuner_config_01.yaml --tag 001 --gpu 0
- ```
-
-### Results
-```python
-[Metrics] logloss: 0.439800 - AUC: 0.812061
-```
-
-
-### Logs
-```python
-2020-06-29 12:02:11,762 P585 INFO {
- "batch_size": "5000",
- "channels": "[38, 40, 42, 44]",
- "conv_activation": "Tanh",
- "conv_batch_norm": "True",
- "data_format": "h5",
- "data_root": "../data/Criteo/",
- "dataset_id": "criteo_x4_5c863b0f",
- "debug": "False",
- "dnn_activations": "ReLU",
- "dnn_batch_norm": "False",
- "dnn_hidden_units": "[1000, 1000, 1000]",
- "embedding_dim": "16",
- "embedding_dropout": "0",
- "embedding_regularizer": "1e-05",
- "epochs": "100",
- "every_x_epochs": "1",
- "gpu": "0",
- "kernel_heights": "9",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['logloss', 'AUC']",
- "model": "FGCNN",
- "model_id": "FGCNN_criteo_x4_5c863b0f_001_aa4d4a89",
- "model_root": "./Criteo/FGCNN_criteo/min10/",
- "monitor": "{'AUC': 1, 'logloss': -1}",
- "monitor_mode": "max",
- "net_dropout": "0",
- "net_regularizer": "0",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "pooling_sizes": "2",
- "recombined_channels": "3",
- "save_best_only": "True",
- "seed": "2019",
- "share_embedding": "False",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Criteo/criteo_x4_5c863b0f/test.h5",
- "train_data": "../data/Criteo/criteo_x4_5c863b0f/train.h5",
- "use_hdf5": "True",
- "valid_data": "../data/Criteo/criteo_x4_5c863b0f/valid.h5",
- "verbose": "0",
- "version": "pytorch",
- "workers": "3"
-}
-2020-06-29 12:02:11,764 P585 INFO Set up feature encoder...
-2020-06-29 12:02:11,764 P585 INFO Load feature_map from json: ../data/Criteo/criteo_x4_5c863b0f/feature_map.json
-2020-06-29 12:02:11,764 P585 INFO Loading data...
-2020-06-29 12:02:11,770 P585 INFO Loading data from h5: ../data/Criteo/criteo_x4_5c863b0f/train.h5
-2020-06-29 12:02:18,861 P585 INFO Loading data from h5: ../data/Criteo/criteo_x4_5c863b0f/valid.h5
-2020-06-29 12:02:21,631 P585 INFO Train samples: total/36672493, pos/9396350, neg/27276143, ratio/25.62%
-2020-06-29 12:02:21,845 P585 INFO Validation samples: total/4584062, pos/1174544, neg/3409518, ratio/25.62%
-2020-06-29 12:02:21,845 P585 INFO Loading train data done.
-2020-06-29 12:04:01,673 P585 INFO Start training: 7335 batches/epoch
-2020-06-29 12:04:01,673 P585 INFO ************ Epoch=1 start ************
-2020-06-29 12:47:57,069 P585 INFO [Metrics] logloss: 0.448108 - AUC: 0.803004
-2020-06-29 12:47:57,071 P585 INFO Save best model: monitor(max): 0.354895
-2020-06-29 12:47:57,736 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 12:47:57,827 P585 INFO Train loss: 0.467665
-2020-06-29 12:47:57,828 P585 INFO ************ Epoch=1 end ************
-2020-06-29 13:28:03,687 P585 INFO [Metrics] logloss: 0.445829 - AUC: 0.805674
-2020-06-29 13:28:03,689 P585 INFO Save best model: monitor(max): 0.359845
-2020-06-29 13:28:04,155 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 13:28:04,215 P585 INFO Train loss: 0.459341
-2020-06-29 13:28:04,215 P585 INFO ************ Epoch=2 end ************
-2020-06-29 14:08:13,687 P585 INFO [Metrics] logloss: 0.446285 - AUC: 0.806552
-2020-06-29 14:08:13,690 P585 INFO Save best model: monitor(max): 0.360267
-2020-06-29 14:08:14,089 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 14:08:14,148 P585 INFO Train loss: 0.458019
-2020-06-29 14:08:14,148 P585 INFO ************ Epoch=3 end ************
-2020-06-29 14:48:15,703 P585 INFO [Metrics] logloss: 0.444209 - AUC: 0.807111
-2020-06-29 14:48:15,704 P585 INFO Save best model: monitor(max): 0.362902
-2020-06-29 14:48:16,133 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 14:48:16,201 P585 INFO Train loss: 0.457285
-2020-06-29 14:48:16,202 P585 INFO ************ Epoch=4 end ************
-2020-06-29 15:29:16,387 P585 INFO [Metrics] logloss: 0.443927 - AUC: 0.807468
-2020-06-29 15:29:16,389 P585 INFO Save best model: monitor(max): 0.363542
-2020-06-29 15:29:16,847 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 15:29:16,922 P585 INFO Train loss: 0.456642
-2020-06-29 15:29:16,922 P585 INFO ************ Epoch=5 end ************
-2020-06-29 16:09:23,828 P585 INFO [Metrics] logloss: 0.443501 - AUC: 0.807929
-2020-06-29 16:09:23,830 P585 INFO Save best model: monitor(max): 0.364429
-2020-06-29 16:09:24,282 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 16:09:24,351 P585 INFO Train loss: 0.456059
-2020-06-29 16:09:24,351 P585 INFO ************ Epoch=6 end ************
-2020-06-29 16:49:58,499 P585 INFO [Metrics] logloss: 0.443385 - AUC: 0.808165
-2020-06-29 16:49:58,502 P585 INFO Save best model: monitor(max): 0.364780
-2020-06-29 16:49:58,963 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 16:49:59,028 P585 INFO Train loss: 0.455580
-2020-06-29 16:49:59,028 P585 INFO ************ Epoch=7 end ************
-2020-06-29 17:30:17,719 P585 INFO [Metrics] logloss: 0.443072 - AUC: 0.808349
-2020-06-29 17:30:17,722 P585 INFO Save best model: monitor(max): 0.365278
-2020-06-29 17:30:18,126 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 17:30:18,188 P585 INFO Train loss: 0.455062
-2020-06-29 17:30:18,188 P585 INFO ************ Epoch=8 end ************
-2020-06-29 18:10:46,954 P585 INFO [Metrics] logloss: 0.443226 - AUC: 0.808389
-2020-06-29 18:10:46,956 P585 INFO Monitor(max) STOP: 0.365163 !
-2020-06-29 18:10:46,956 P585 INFO Reduce learning rate on plateau: 0.000100
-2020-06-29 18:10:46,956 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 18:10:47,013 P585 INFO Train loss: 0.454655
-2020-06-29 18:10:47,014 P585 INFO ************ Epoch=9 end ************
-2020-06-29 18:51:28,063 P585 INFO [Metrics] logloss: 0.440299 - AUC: 0.811523
-2020-06-29 18:51:28,064 P585 INFO Save best model: monitor(max): 0.371223
-2020-06-29 18:51:28,497 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 18:51:28,579 P585 INFO Train loss: 0.441741
-2020-06-29 18:51:28,579 P585 INFO ************ Epoch=10 end ************
-2020-06-29 19:32:26,740 P585 INFO [Metrics] logloss: 0.440686 - AUC: 0.811480
-2020-06-29 19:32:26,741 P585 INFO Monitor(max) STOP: 0.370794 !
-2020-06-29 19:32:26,741 P585 INFO Reduce learning rate on plateau: 0.000010
-2020-06-29 19:32:26,741 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 19:32:26,796 P585 INFO Train loss: 0.435093
-2020-06-29 19:32:26,797 P585 INFO ************ Epoch=11 end ************
-2020-06-29 20:13:32,121 P585 INFO [Metrics] logloss: 0.441912 - AUC: 0.810810
-2020-06-29 20:13:32,123 P585 INFO Monitor(max) STOP: 0.368898 !
-2020-06-29 20:13:32,124 P585 INFO Reduce learning rate on plateau: 0.000001
-2020-06-29 20:13:32,128 P585 INFO Early stopping at epoch=12
-2020-06-29 20:13:32,128 P585 INFO --- 7335/7335 batches finished ---
-2020-06-29 20:13:32,193 P585 INFO Train loss: 0.428736
-2020-06-29 20:13:32,194 P585 INFO Training finished.
-2020-06-29 20:13:32,194 P585 INFO Load best model: /cache/xxx/FuxiCTR/benchmarks/Criteo/FGCNN_criteo/min10/criteo_x4_5c863b0f/FGCNN_criteo_x4_5c863b0f_001_aa4d4a89_model.ckpt
-2020-06-29 20:13:32,550 P585 INFO ****** Train/validation evaluation ******
-2020-06-29 20:23:19,424 P585 INFO [Metrics] logloss: 0.426271 - AUC: 0.826100
-2020-06-29 20:24:27,139 P585 INFO [Metrics] logloss: 0.440299 - AUC: 0.811523
-2020-06-29 20:24:27,246 P585 INFO ******** Test evaluation ********
-2020-06-29 20:24:27,246 P585 INFO Loading data...
-2020-06-29 20:24:27,246 P585 INFO Loading data from h5: ../data/Criteo/criteo_x4_5c863b0f/test.h5
-2020-06-29 20:24:28,203 P585 INFO Test samples: total/4584062, pos/1174544, neg/3409518, ratio/25.62%
-2020-06-29 20:24:28,203 P585 INFO Loading test data done.
-2020-06-29 20:25:39,519 P585 INFO [Metrics] logloss: 0.439800 - AUC: 0.812061
-
-
-```
+## FGCNN_Criteo_x4_001
+
+A notebook to benchmark FGCNN on Criteo_x4_001 dataset.
+
+Author: [XUEPAI Team](https://github.com/reczoo)
+
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ RAM: 500G+
+ ```
++ Software
+
+ ```python
+ python: 3.6.5
+ pandas: 1.0.0
+ numpy: 1.18.1
+ ```
+
+### Dataset
+In this setting, we preprocess the data split by removing the id field that is useless for CTR prediction. In addition, we transform the timestamp field into three fields: hour, weekday, and is_weekend. For all categorical fields, we filter infrequent features by setting the threshold min_category_count=2 (performs well) and replace them with a default token. Note that we do not follow the exact preprocessing steps in AutoInt, because the authors neither remove the useless id field nor specially preprocess the timestamp field.
+
+To make a fair comparison, we fix embedding_dim=16 as with AutoInt.
+
+
+### Code
+1. Install FuxiCTR
+
+ Install FuxiCTR via `pip install fuxictr==1.0` to get all dependencies ready. Then download [the FuxiCTR repository](https://github.com/huawei-noah/benchmark/archive/53e314461c19dbc7f462b42bf0f0bfae020dc398.zip) to your local path.
+
+2. Downalod the dataset and run [the preprocessing script](https://github.com/reczoo/Open-CTR-Benchmark/blob/master/datasets/Criteo/Criteo_x4/split_criteo_x4.py) for data splitting.
+
+3. Download the hyper-parameter configuration file: [FGCNN_criteo_x4_tuner_config_01.yaml](./FGCNN_criteo_x4_tuner_config_01.yaml)
+
+4. Run the following script to reproduce the result.
+ + --config: The config file that defines the tuning space
+ + --tag: Specify which expid to run (each expid corresponds to a specific setting of hyper-parameters in the tunner space)
+ + --gpu: The available gpus for parameters tuning.
+
+ ```bash
+ cd FuxiCTR/benchmarks
+ python run_param_tuner.py --config YOUR_PATH/FGCNN_criteo_x4_tuner_config_01.yaml --tag 001 --gpu 0
+ ```
+
+### Results
+```python
+[Metrics] logloss: 0.439800 - AUC: 0.812061
+```
+
+
+### Logs
+```python
+2020-06-29 12:02:11,762 P585 INFO {
+ "batch_size": "5000",
+ "channels": "[38, 40, 42, 44]",
+ "conv_activation": "Tanh",
+ "conv_batch_norm": "True",
+ "data_format": "h5",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x4_5c863b0f",
+ "debug": "False",
+ "dnn_activations": "ReLU",
+ "dnn_batch_norm": "False",
+ "dnn_hidden_units": "[1000, 1000, 1000]",
+ "embedding_dim": "16",
+ "embedding_dropout": "0",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "gpu": "0",
+ "kernel_heights": "9",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['logloss', 'AUC']",
+ "model": "FGCNN",
+ "model_id": "FGCNN_criteo_x4_5c863b0f_001_aa4d4a89",
+ "model_root": "./Criteo/FGCNN_criteo/min10/",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_dropout": "0",
+ "net_regularizer": "0",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "pooling_sizes": "2",
+ "recombined_channels": "3",
+ "save_best_only": "True",
+ "seed": "2019",
+ "share_embedding": "False",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/criteo_x4_5c863b0f/test.h5",
+ "train_data": "../data/Criteo/criteo_x4_5c863b0f/train.h5",
+ "use_hdf5": "True",
+ "valid_data": "../data/Criteo/criteo_x4_5c863b0f/valid.h5",
+ "verbose": "0",
+ "version": "pytorch",
+ "workers": "3"
+}
+2020-06-29 12:02:11,764 P585 INFO Set up feature encoder...
+2020-06-29 12:02:11,764 P585 INFO Load feature_map from json: ../data/Criteo/criteo_x4_5c863b0f/feature_map.json
+2020-06-29 12:02:11,764 P585 INFO Loading data...
+2020-06-29 12:02:11,770 P585 INFO Loading data from h5: ../data/Criteo/criteo_x4_5c863b0f/train.h5
+2020-06-29 12:02:18,861 P585 INFO Loading data from h5: ../data/Criteo/criteo_x4_5c863b0f/valid.h5
+2020-06-29 12:02:21,631 P585 INFO Train samples: total/36672493, pos/9396350, neg/27276143, ratio/25.62%
+2020-06-29 12:02:21,845 P585 INFO Validation samples: total/4584062, pos/1174544, neg/3409518, ratio/25.62%
+2020-06-29 12:02:21,845 P585 INFO Loading train data done.
+2020-06-29 12:04:01,673 P585 INFO Start training: 7335 batches/epoch
+2020-06-29 12:04:01,673 P585 INFO ************ Epoch=1 start ************
+2020-06-29 12:47:57,069 P585 INFO [Metrics] logloss: 0.448108 - AUC: 0.803004
+2020-06-29 12:47:57,071 P585 INFO Save best model: monitor(max): 0.354895
+2020-06-29 12:47:57,736 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 12:47:57,827 P585 INFO Train loss: 0.467665
+2020-06-29 12:47:57,828 P585 INFO ************ Epoch=1 end ************
+2020-06-29 13:28:03,687 P585 INFO [Metrics] logloss: 0.445829 - AUC: 0.805674
+2020-06-29 13:28:03,689 P585 INFO Save best model: monitor(max): 0.359845
+2020-06-29 13:28:04,155 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 13:28:04,215 P585 INFO Train loss: 0.459341
+2020-06-29 13:28:04,215 P585 INFO ************ Epoch=2 end ************
+2020-06-29 14:08:13,687 P585 INFO [Metrics] logloss: 0.446285 - AUC: 0.806552
+2020-06-29 14:08:13,690 P585 INFO Save best model: monitor(max): 0.360267
+2020-06-29 14:08:14,089 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 14:08:14,148 P585 INFO Train loss: 0.458019
+2020-06-29 14:08:14,148 P585 INFO ************ Epoch=3 end ************
+2020-06-29 14:48:15,703 P585 INFO [Metrics] logloss: 0.444209 - AUC: 0.807111
+2020-06-29 14:48:15,704 P585 INFO Save best model: monitor(max): 0.362902
+2020-06-29 14:48:16,133 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 14:48:16,201 P585 INFO Train loss: 0.457285
+2020-06-29 14:48:16,202 P585 INFO ************ Epoch=4 end ************
+2020-06-29 15:29:16,387 P585 INFO [Metrics] logloss: 0.443927 - AUC: 0.807468
+2020-06-29 15:29:16,389 P585 INFO Save best model: monitor(max): 0.363542
+2020-06-29 15:29:16,847 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 15:29:16,922 P585 INFO Train loss: 0.456642
+2020-06-29 15:29:16,922 P585 INFO ************ Epoch=5 end ************
+2020-06-29 16:09:23,828 P585 INFO [Metrics] logloss: 0.443501 - AUC: 0.807929
+2020-06-29 16:09:23,830 P585 INFO Save best model: monitor(max): 0.364429
+2020-06-29 16:09:24,282 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 16:09:24,351 P585 INFO Train loss: 0.456059
+2020-06-29 16:09:24,351 P585 INFO ************ Epoch=6 end ************
+2020-06-29 16:49:58,499 P585 INFO [Metrics] logloss: 0.443385 - AUC: 0.808165
+2020-06-29 16:49:58,502 P585 INFO Save best model: monitor(max): 0.364780
+2020-06-29 16:49:58,963 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 16:49:59,028 P585 INFO Train loss: 0.455580
+2020-06-29 16:49:59,028 P585 INFO ************ Epoch=7 end ************
+2020-06-29 17:30:17,719 P585 INFO [Metrics] logloss: 0.443072 - AUC: 0.808349
+2020-06-29 17:30:17,722 P585 INFO Save best model: monitor(max): 0.365278
+2020-06-29 17:30:18,126 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 17:30:18,188 P585 INFO Train loss: 0.455062
+2020-06-29 17:30:18,188 P585 INFO ************ Epoch=8 end ************
+2020-06-29 18:10:46,954 P585 INFO [Metrics] logloss: 0.443226 - AUC: 0.808389
+2020-06-29 18:10:46,956 P585 INFO Monitor(max) STOP: 0.365163 !
+2020-06-29 18:10:46,956 P585 INFO Reduce learning rate on plateau: 0.000100
+2020-06-29 18:10:46,956 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 18:10:47,013 P585 INFO Train loss: 0.454655
+2020-06-29 18:10:47,014 P585 INFO ************ Epoch=9 end ************
+2020-06-29 18:51:28,063 P585 INFO [Metrics] logloss: 0.440299 - AUC: 0.811523
+2020-06-29 18:51:28,064 P585 INFO Save best model: monitor(max): 0.371223
+2020-06-29 18:51:28,497 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 18:51:28,579 P585 INFO Train loss: 0.441741
+2020-06-29 18:51:28,579 P585 INFO ************ Epoch=10 end ************
+2020-06-29 19:32:26,740 P585 INFO [Metrics] logloss: 0.440686 - AUC: 0.811480
+2020-06-29 19:32:26,741 P585 INFO Monitor(max) STOP: 0.370794 !
+2020-06-29 19:32:26,741 P585 INFO Reduce learning rate on plateau: 0.000010
+2020-06-29 19:32:26,741 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 19:32:26,796 P585 INFO Train loss: 0.435093
+2020-06-29 19:32:26,797 P585 INFO ************ Epoch=11 end ************
+2020-06-29 20:13:32,121 P585 INFO [Metrics] logloss: 0.441912 - AUC: 0.810810
+2020-06-29 20:13:32,123 P585 INFO Monitor(max) STOP: 0.368898 !
+2020-06-29 20:13:32,124 P585 INFO Reduce learning rate on plateau: 0.000001
+2020-06-29 20:13:32,128 P585 INFO Early stopping at epoch=12
+2020-06-29 20:13:32,128 P585 INFO --- 7335/7335 batches finished ---
+2020-06-29 20:13:32,193 P585 INFO Train loss: 0.428736
+2020-06-29 20:13:32,194 P585 INFO Training finished.
+2020-06-29 20:13:32,194 P585 INFO Load best model: /cache/xxx/FuxiCTR/benchmarks/Criteo/FGCNN_criteo/min10/criteo_x4_5c863b0f/FGCNN_criteo_x4_5c863b0f_001_aa4d4a89_model.ckpt
+2020-06-29 20:13:32,550 P585 INFO ****** Train/validation evaluation ******
+2020-06-29 20:23:19,424 P585 INFO [Metrics] logloss: 0.426271 - AUC: 0.826100
+2020-06-29 20:24:27,139 P585 INFO [Metrics] logloss: 0.440299 - AUC: 0.811523
+2020-06-29 20:24:27,246 P585 INFO ******** Test evaluation ********
+2020-06-29 20:24:27,246 P585 INFO Loading data...
+2020-06-29 20:24:27,246 P585 INFO Loading data from h5: ../data/Criteo/criteo_x4_5c863b0f/test.h5
+2020-06-29 20:24:28,203 P585 INFO Test samples: total/4584062, pos/1174544, neg/3409518, ratio/25.62%
+2020-06-29 20:24:28,203 P585 INFO Loading test data done.
+2020-06-29 20:25:39,519 P585 INFO [Metrics] logloss: 0.439800 - AUC: 0.812061
+
+
+```
diff --git a/ranking/ctr/FGCNN/FGCNN_criteo_x4_002/README.md b/ranking/ctr/FGCNN/FGCNN_criteo_x4_002/README.md
index b1fd8308..978ce244 100644
--- a/ranking/ctr/FGCNN/FGCNN_criteo_x4_002/README.md
+++ b/ranking/ctr/FGCNN/FGCNN_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FGCNN model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FGCNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FGCNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FGCNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FGCNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FGCNN/FGCNN_kkbox_x1/README.md b/ranking/ctr/FGCNN/FGCNN_kkbox_x1/README.md
index bf4671ad..7fcd9746 100644
--- a/ranking/ctr/FGCNN/FGCNN_kkbox_x1/README.md
+++ b/ranking/ctr/FGCNN/FGCNN_kkbox_x1/README.md
@@ -1,257 +1,257 @@
-## FGCNN_kkbox_x1
-
-A hands-on guide to run the FGCNN model on the KKBox_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
- GPU: Tesla V100 32G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.2
- python: 3.6.4
- pytorch: 1.0.0
- pandas: 0.22.0
- numpy: 1.19.2
- scipy: 1.5.4
- sklearn: 0.22.1
- pyyaml: 5.4.1
- h5py: 2.8.0
- tqdm: 4.60.0
- fuxictr: 1.0.2
- ```
-
-### Dataset
-Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/KKBox#KKBox_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FGCNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FGCNN.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/KKBox/KKBox_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [FGCNN_kkbox_x1_tuner_config_02](./FGCNN_kkbox_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd FGCNN_kkbox_x1
- nohup python run_expid.py --config ./FGCNN_kkbox_x1_tuner_config_02 --expid FGCNN_kkbox_x1_014_48888fb8 --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| logloss | AUC |
-|:--------------------:|:--------------------:|
-| 0.480056 | 0.852189 |
-
-
-### Logs
-```python
-2022-03-11 09:13:55,483 P27740 INFO {
- "batch_size": "2000",
- "channels": "[14, 16, 18, 20]",
- "conv_activation": "Tanh",
- "conv_batch_norm": "True",
- "data_format": "csv",
- "data_root": "../data/KKBox/",
- "dataset_id": "kkbox_x1_227d337d",
- "debug": "False",
- "dnn_activations": "ReLU",
- "dnn_batch_norm": "False",
- "dnn_hidden_units": "[1000, 1000, 1000]",
- "embedding_dim": "128",
- "embedding_dropout": "0",
- "embedding_regularizer": "0.001",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'str', 'name': ['msno', 'song_id', 'source_system_tab', 'source_screen_name', 'source_type', 'city', 'gender', 'registered_via', 'language'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'genre_ids', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'artist_name', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'name': 'isrc', 'preprocess': 'extract_country_code', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'bd', 'preprocess': 'bucketize_age', 'type': 'categorical'}]",
- "gpu": "4",
- "kernel_heights": "[7, 7, 7, 7]",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.0005",
- "loss": "binary_crossentropy",
- "metrics": "['logloss', 'AUC']",
- "min_categr_count": "10",
- "model": "FGCNN",
- "model_id": "FGCNN_kkbox_x1_014_48888fb8",
- "model_root": "./KKBox/FGCNN_kkbox_x1/",
- "monitor": "{'AUC': 1, 'logloss': -1}",
- "monitor_mode": "max",
- "net_dropout": "0",
- "net_regularizer": "0",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "pooling_sizes": "[2, 2, 2, 2]",
- "recombined_channels": "[3, 3, 3, 3]",
- "save_best_only": "True",
- "seed": "2019",
- "share_embedding": "False",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/KKBox/KKBox_x1/test.csv",
- "train_data": "../data/KKBox/KKBox_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/KKBox/KKBox_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch",
- "workers": "3"
-}
-2022-03-11 09:13:55,483 P27740 INFO Set up feature encoder...
-2022-03-11 09:13:55,483 P27740 INFO Load feature_encoder from pickle: ../data/KKBox/kkbox_x1_227d337d/feature_encoder.pkl
-2022-03-11 09:13:56,945 P27740 INFO Total number of parameters: 84979183.
-2022-03-11 09:13:56,945 P27740 INFO Loading data...
-2022-03-11 09:13:56,946 P27740 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/train.h5
-2022-03-11 09:13:57,256 P27740 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/valid.h5
-2022-03-11 09:13:57,448 P27740 INFO Train samples: total/5901932, pos/2971724, neg/2930208, ratio/50.35%
-2022-03-11 09:13:57,465 P27740 INFO Validation samples: total/737743, pos/371466, neg/366277, ratio/50.35%
-2022-03-11 09:13:57,465 P27740 INFO Loading train data done.
-2022-03-11 09:14:00,375 P27740 INFO Start training: 2951 batches/epoch
-2022-03-11 09:14:00,375 P27740 INFO ************ Epoch=1 start ************
-2022-03-11 09:26:49,473 P27740 INFO [Metrics] logloss: 0.549213 - AUC: 0.792878
-2022-03-11 09:26:49,473 P27740 INFO Save best model: monitor(max): 0.243665
-2022-03-11 09:26:49,960 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 09:26:49,989 P27740 INFO Train loss: 0.649823
-2022-03-11 09:26:49,989 P27740 INFO ************ Epoch=1 end ************
-2022-03-11 09:39:38,375 P27740 INFO [Metrics] logloss: 0.544913 - AUC: 0.797325
-2022-03-11 09:39:38,376 P27740 INFO Save best model: monitor(max): 0.252411
-2022-03-11 09:39:38,813 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 09:39:38,850 P27740 INFO Train loss: 0.639763
-2022-03-11 09:39:38,850 P27740 INFO ************ Epoch=2 end ************
-2022-03-11 09:52:27,209 P27740 INFO [Metrics] logloss: 0.540808 - AUC: 0.801736
-2022-03-11 09:52:27,210 P27740 INFO Save best model: monitor(max): 0.260928
-2022-03-11 09:52:28,093 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 09:52:28,129 P27740 INFO Train loss: 0.640772
-2022-03-11 09:52:28,129 P27740 INFO ************ Epoch=3 end ************
-2022-03-11 10:05:15,125 P27740 INFO [Metrics] logloss: 0.537259 - AUC: 0.804458
-2022-03-11 10:05:15,126 P27740 INFO Save best model: monitor(max): 0.267198
-2022-03-11 10:05:15,572 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 10:05:15,607 P27740 INFO Train loss: 0.638841
-2022-03-11 10:05:15,608 P27740 INFO ************ Epoch=4 end ************
-2022-03-11 10:18:02,710 P27740 INFO [Metrics] logloss: 0.534469 - AUC: 0.806573
-2022-03-11 10:18:02,711 P27740 INFO Save best model: monitor(max): 0.272104
-2022-03-11 10:18:03,184 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 10:18:03,228 P27740 INFO Train loss: 0.638757
-2022-03-11 10:18:03,229 P27740 INFO ************ Epoch=5 end ************
-2022-03-11 10:30:50,443 P27740 INFO [Metrics] logloss: 0.532756 - AUC: 0.808265
-2022-03-11 10:30:50,444 P27740 INFO Save best model: monitor(max): 0.275510
-2022-03-11 10:30:50,875 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 10:30:50,911 P27740 INFO Train loss: 0.639444
-2022-03-11 10:30:50,911 P27740 INFO ************ Epoch=6 end ************
-2022-03-11 10:43:38,734 P27740 INFO [Metrics] logloss: 0.529260 - AUC: 0.811055
-2022-03-11 10:43:38,735 P27740 INFO Save best model: monitor(max): 0.281795
-2022-03-11 10:43:39,200 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 10:43:39,235 P27740 INFO Train loss: 0.637451
-2022-03-11 10:43:39,235 P27740 INFO ************ Epoch=7 end ************
-2022-03-11 10:56:27,223 P27740 INFO [Metrics] logloss: 0.528284 - AUC: 0.812920
-2022-03-11 10:56:27,224 P27740 INFO Save best model: monitor(max): 0.284635
-2022-03-11 10:56:27,652 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 10:56:27,693 P27740 INFO Train loss: 0.634762
-2022-03-11 10:56:27,693 P27740 INFO ************ Epoch=8 end ************
-2022-03-11 11:09:16,119 P27740 INFO [Metrics] logloss: 0.522795 - AUC: 0.816325
-2022-03-11 11:09:16,120 P27740 INFO Save best model: monitor(max): 0.293530
-2022-03-11 11:09:16,573 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 11:09:16,609 P27740 INFO Train loss: 0.631828
-2022-03-11 11:09:16,609 P27740 INFO ************ Epoch=9 end ************
-2022-03-11 11:22:03,978 P27740 INFO [Metrics] logloss: 0.521295 - AUC: 0.817766
-2022-03-11 11:22:03,978 P27740 INFO Save best model: monitor(max): 0.296471
-2022-03-11 11:22:04,379 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 11:22:04,413 P27740 INFO Train loss: 0.629391
-2022-03-11 11:22:04,413 P27740 INFO ************ Epoch=10 end ************
-2022-03-11 11:34:51,227 P27740 INFO [Metrics] logloss: 0.519314 - AUC: 0.819381
-2022-03-11 11:34:51,228 P27740 INFO Save best model: monitor(max): 0.300068
-2022-03-11 11:34:51,701 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 11:34:51,736 P27740 INFO Train loss: 0.625961
-2022-03-11 11:34:51,736 P27740 INFO ************ Epoch=11 end ************
-2022-03-11 11:47:39,701 P27740 INFO [Metrics] logloss: 0.517578 - AUC: 0.820738
-2022-03-11 11:47:39,702 P27740 INFO Save best model: monitor(max): 0.303160
-2022-03-11 11:47:40,236 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 11:47:40,271 P27740 INFO Train loss: 0.624200
-2022-03-11 11:47:40,271 P27740 INFO ************ Epoch=12 end ************
-2022-03-11 12:00:26,970 P27740 INFO [Metrics] logloss: 0.516383 - AUC: 0.821670
-2022-03-11 12:00:26,971 P27740 INFO Save best model: monitor(max): 0.305287
-2022-03-11 12:00:27,395 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 12:00:27,430 P27740 INFO Train loss: 0.622181
-2022-03-11 12:00:27,430 P27740 INFO ************ Epoch=13 end ************
-2022-03-11 12:13:15,641 P27740 INFO [Metrics] logloss: 0.515959 - AUC: 0.822612
-2022-03-11 12:13:15,642 P27740 INFO Save best model: monitor(max): 0.306652
-2022-03-11 12:13:16,084 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 12:13:16,119 P27740 INFO Train loss: 0.620168
-2022-03-11 12:13:16,119 P27740 INFO ************ Epoch=14 end ************
-2022-03-11 12:26:03,216 P27740 INFO [Metrics] logloss: 0.514723 - AUC: 0.822813
-2022-03-11 12:26:03,216 P27740 INFO Save best model: monitor(max): 0.308090
-2022-03-11 12:26:03,648 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 12:26:03,691 P27740 INFO Train loss: 0.618613
-2022-03-11 12:26:03,691 P27740 INFO ************ Epoch=15 end ************
-2022-03-11 12:38:51,080 P27740 INFO [Metrics] logloss: 0.513329 - AUC: 0.823951
-2022-03-11 12:38:51,081 P27740 INFO Save best model: monitor(max): 0.310623
-2022-03-11 12:38:51,554 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 12:38:51,588 P27740 INFO Train loss: 0.617123
-2022-03-11 12:38:51,588 P27740 INFO ************ Epoch=16 end ************
-2022-03-11 12:51:38,771 P27740 INFO [Metrics] logloss: 0.511042 - AUC: 0.825877
-2022-03-11 12:51:38,772 P27740 INFO Save best model: monitor(max): 0.314835
-2022-03-11 12:51:39,213 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 12:51:39,247 P27740 INFO Train loss: 0.616331
-2022-03-11 12:51:39,247 P27740 INFO ************ Epoch=17 end ************
-2022-03-11 13:04:26,896 P27740 INFO [Metrics] logloss: 0.511394 - AUC: 0.825754
-2022-03-11 13:04:26,896 P27740 INFO Monitor(max) STOP: 0.314360 !
-2022-03-11 13:04:26,896 P27740 INFO Reduce learning rate on plateau: 0.000050
-2022-03-11 13:04:26,896 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 13:04:26,929 P27740 INFO Train loss: 0.615077
-2022-03-11 13:04:26,929 P27740 INFO ************ Epoch=18 end ************
-2022-03-11 13:17:14,561 P27740 INFO [Metrics] logloss: 0.481099 - AUC: 0.849566
-2022-03-11 13:17:14,561 P27740 INFO Save best model: monitor(max): 0.368468
-2022-03-11 13:17:15,167 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 13:17:15,202 P27740 INFO Train loss: 0.525168
-2022-03-11 13:17:15,202 P27740 INFO ************ Epoch=19 end ************
-2022-03-11 13:30:03,076 P27740 INFO [Metrics] logloss: 0.479571 - AUC: 0.852404
-2022-03-11 13:30:03,077 P27740 INFO Save best model: monitor(max): 0.372833
-2022-03-11 13:30:03,525 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 13:30:03,569 P27740 INFO Train loss: 0.468585
-2022-03-11 13:30:03,569 P27740 INFO ************ Epoch=20 end ************
-2022-03-11 13:42:51,185 P27740 INFO [Metrics] logloss: 0.488731 - AUC: 0.850102
-2022-03-11 13:42:51,185 P27740 INFO Monitor(max) STOP: 0.361370 !
-2022-03-11 13:42:51,185 P27740 INFO Reduce learning rate on plateau: 0.000005
-2022-03-11 13:42:51,185 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 13:42:51,221 P27740 INFO Train loss: 0.440094
-2022-03-11 13:42:51,221 P27740 INFO ************ Epoch=21 end ************
-2022-03-11 13:55:38,498 P27740 INFO [Metrics] logloss: 0.540178 - AUC: 0.844262
-2022-03-11 13:55:38,498 P27740 INFO Monitor(max) STOP: 0.304084 !
-2022-03-11 13:55:38,499 P27740 INFO Reduce learning rate on plateau: 0.000001
-2022-03-11 13:55:38,499 P27740 INFO Early stopping at epoch=22
-2022-03-11 13:55:38,499 P27740 INFO --- 2951/2951 batches finished ---
-2022-03-11 13:55:38,533 P27740 INFO Train loss: 0.376207
-2022-03-11 13:55:38,533 P27740 INFO Training finished.
-2022-03-11 13:55:38,533 P27740 INFO Load best model: /cache/FuxiCTR/benchmarks/KKBox/FGCNN_kkbox_x1/kkbox_x1_227d337d/FGCNN_kkbox_x1_014_48888fb8_model.ckpt
-2022-03-11 13:55:39,078 P27740 INFO ****** Validation evaluation ******
-2022-03-11 13:55:53,029 P27740 INFO [Metrics] logloss: 0.479571 - AUC: 0.852404
-2022-03-11 13:55:53,087 P27740 INFO ******** Test evaluation ********
-2022-03-11 13:55:53,087 P27740 INFO Loading data...
-2022-03-11 13:55:53,087 P27740 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/test.h5
-2022-03-11 13:55:53,159 P27740 INFO Test samples: total/737743, pos/371466, neg/366277, ratio/50.35%
-2022-03-11 13:55:53,159 P27740 INFO Loading test data done.
-2022-03-11 13:56:07,158 P27740 INFO [Metrics] logloss: 0.480056 - AUC: 0.852189
-
-```
+## FGCNN_kkbox_x1
+
+A hands-on guide to run the FGCNN model on the KKBox_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.6.4
+ pytorch: 1.0.0
+ pandas: 0.22.0
+ numpy: 1.19.2
+ scipy: 1.5.4
+ sklearn: 0.22.1
+ pyyaml: 5.4.1
+ h5py: 2.8.0
+ tqdm: 4.60.0
+ fuxictr: 1.0.2
+ ```
+
+### Dataset
+Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/KKBox#KKBox_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FGCNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FGCNN.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/KKBox/KKBox_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [FGCNN_kkbox_x1_tuner_config_02](./FGCNN_kkbox_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd FGCNN_kkbox_x1
+ nohup python run_expid.py --config ./FGCNN_kkbox_x1_tuner_config_02 --expid FGCNN_kkbox_x1_014_48888fb8 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| logloss | AUC |
+|:--------------------:|:--------------------:|
+| 0.480056 | 0.852189 |
+
+
+### Logs
+```python
+2022-03-11 09:13:55,483 P27740 INFO {
+ "batch_size": "2000",
+ "channels": "[14, 16, 18, 20]",
+ "conv_activation": "Tanh",
+ "conv_batch_norm": "True",
+ "data_format": "csv",
+ "data_root": "../data/KKBox/",
+ "dataset_id": "kkbox_x1_227d337d",
+ "debug": "False",
+ "dnn_activations": "ReLU",
+ "dnn_batch_norm": "False",
+ "dnn_hidden_units": "[1000, 1000, 1000]",
+ "embedding_dim": "128",
+ "embedding_dropout": "0",
+ "embedding_regularizer": "0.001",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'str', 'name': ['msno', 'song_id', 'source_system_tab', 'source_screen_name', 'source_type', 'city', 'gender', 'registered_via', 'language'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'genre_ids', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'artist_name', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'name': 'isrc', 'preprocess': 'extract_country_code', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'bd', 'preprocess': 'bucketize_age', 'type': 'categorical'}]",
+ "gpu": "4",
+ "kernel_heights": "[7, 7, 7, 7]",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.0005",
+ "loss": "binary_crossentropy",
+ "metrics": "['logloss', 'AUC']",
+ "min_categr_count": "10",
+ "model": "FGCNN",
+ "model_id": "FGCNN_kkbox_x1_014_48888fb8",
+ "model_root": "./KKBox/FGCNN_kkbox_x1/",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_dropout": "0",
+ "net_regularizer": "0",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "pooling_sizes": "[2, 2, 2, 2]",
+ "recombined_channels": "[3, 3, 3, 3]",
+ "save_best_only": "True",
+ "seed": "2019",
+ "share_embedding": "False",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/KKBox/KKBox_x1/test.csv",
+ "train_data": "../data/KKBox/KKBox_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/KKBox/KKBox_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch",
+ "workers": "3"
+}
+2022-03-11 09:13:55,483 P27740 INFO Set up feature encoder...
+2022-03-11 09:13:55,483 P27740 INFO Load feature_encoder from pickle: ../data/KKBox/kkbox_x1_227d337d/feature_encoder.pkl
+2022-03-11 09:13:56,945 P27740 INFO Total number of parameters: 84979183.
+2022-03-11 09:13:56,945 P27740 INFO Loading data...
+2022-03-11 09:13:56,946 P27740 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/train.h5
+2022-03-11 09:13:57,256 P27740 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/valid.h5
+2022-03-11 09:13:57,448 P27740 INFO Train samples: total/5901932, pos/2971724, neg/2930208, ratio/50.35%
+2022-03-11 09:13:57,465 P27740 INFO Validation samples: total/737743, pos/371466, neg/366277, ratio/50.35%
+2022-03-11 09:13:57,465 P27740 INFO Loading train data done.
+2022-03-11 09:14:00,375 P27740 INFO Start training: 2951 batches/epoch
+2022-03-11 09:14:00,375 P27740 INFO ************ Epoch=1 start ************
+2022-03-11 09:26:49,473 P27740 INFO [Metrics] logloss: 0.549213 - AUC: 0.792878
+2022-03-11 09:26:49,473 P27740 INFO Save best model: monitor(max): 0.243665
+2022-03-11 09:26:49,960 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 09:26:49,989 P27740 INFO Train loss: 0.649823
+2022-03-11 09:26:49,989 P27740 INFO ************ Epoch=1 end ************
+2022-03-11 09:39:38,375 P27740 INFO [Metrics] logloss: 0.544913 - AUC: 0.797325
+2022-03-11 09:39:38,376 P27740 INFO Save best model: monitor(max): 0.252411
+2022-03-11 09:39:38,813 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 09:39:38,850 P27740 INFO Train loss: 0.639763
+2022-03-11 09:39:38,850 P27740 INFO ************ Epoch=2 end ************
+2022-03-11 09:52:27,209 P27740 INFO [Metrics] logloss: 0.540808 - AUC: 0.801736
+2022-03-11 09:52:27,210 P27740 INFO Save best model: monitor(max): 0.260928
+2022-03-11 09:52:28,093 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 09:52:28,129 P27740 INFO Train loss: 0.640772
+2022-03-11 09:52:28,129 P27740 INFO ************ Epoch=3 end ************
+2022-03-11 10:05:15,125 P27740 INFO [Metrics] logloss: 0.537259 - AUC: 0.804458
+2022-03-11 10:05:15,126 P27740 INFO Save best model: monitor(max): 0.267198
+2022-03-11 10:05:15,572 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 10:05:15,607 P27740 INFO Train loss: 0.638841
+2022-03-11 10:05:15,608 P27740 INFO ************ Epoch=4 end ************
+2022-03-11 10:18:02,710 P27740 INFO [Metrics] logloss: 0.534469 - AUC: 0.806573
+2022-03-11 10:18:02,711 P27740 INFO Save best model: monitor(max): 0.272104
+2022-03-11 10:18:03,184 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 10:18:03,228 P27740 INFO Train loss: 0.638757
+2022-03-11 10:18:03,229 P27740 INFO ************ Epoch=5 end ************
+2022-03-11 10:30:50,443 P27740 INFO [Metrics] logloss: 0.532756 - AUC: 0.808265
+2022-03-11 10:30:50,444 P27740 INFO Save best model: monitor(max): 0.275510
+2022-03-11 10:30:50,875 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 10:30:50,911 P27740 INFO Train loss: 0.639444
+2022-03-11 10:30:50,911 P27740 INFO ************ Epoch=6 end ************
+2022-03-11 10:43:38,734 P27740 INFO [Metrics] logloss: 0.529260 - AUC: 0.811055
+2022-03-11 10:43:38,735 P27740 INFO Save best model: monitor(max): 0.281795
+2022-03-11 10:43:39,200 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 10:43:39,235 P27740 INFO Train loss: 0.637451
+2022-03-11 10:43:39,235 P27740 INFO ************ Epoch=7 end ************
+2022-03-11 10:56:27,223 P27740 INFO [Metrics] logloss: 0.528284 - AUC: 0.812920
+2022-03-11 10:56:27,224 P27740 INFO Save best model: monitor(max): 0.284635
+2022-03-11 10:56:27,652 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 10:56:27,693 P27740 INFO Train loss: 0.634762
+2022-03-11 10:56:27,693 P27740 INFO ************ Epoch=8 end ************
+2022-03-11 11:09:16,119 P27740 INFO [Metrics] logloss: 0.522795 - AUC: 0.816325
+2022-03-11 11:09:16,120 P27740 INFO Save best model: monitor(max): 0.293530
+2022-03-11 11:09:16,573 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 11:09:16,609 P27740 INFO Train loss: 0.631828
+2022-03-11 11:09:16,609 P27740 INFO ************ Epoch=9 end ************
+2022-03-11 11:22:03,978 P27740 INFO [Metrics] logloss: 0.521295 - AUC: 0.817766
+2022-03-11 11:22:03,978 P27740 INFO Save best model: monitor(max): 0.296471
+2022-03-11 11:22:04,379 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 11:22:04,413 P27740 INFO Train loss: 0.629391
+2022-03-11 11:22:04,413 P27740 INFO ************ Epoch=10 end ************
+2022-03-11 11:34:51,227 P27740 INFO [Metrics] logloss: 0.519314 - AUC: 0.819381
+2022-03-11 11:34:51,228 P27740 INFO Save best model: monitor(max): 0.300068
+2022-03-11 11:34:51,701 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 11:34:51,736 P27740 INFO Train loss: 0.625961
+2022-03-11 11:34:51,736 P27740 INFO ************ Epoch=11 end ************
+2022-03-11 11:47:39,701 P27740 INFO [Metrics] logloss: 0.517578 - AUC: 0.820738
+2022-03-11 11:47:39,702 P27740 INFO Save best model: monitor(max): 0.303160
+2022-03-11 11:47:40,236 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 11:47:40,271 P27740 INFO Train loss: 0.624200
+2022-03-11 11:47:40,271 P27740 INFO ************ Epoch=12 end ************
+2022-03-11 12:00:26,970 P27740 INFO [Metrics] logloss: 0.516383 - AUC: 0.821670
+2022-03-11 12:00:26,971 P27740 INFO Save best model: monitor(max): 0.305287
+2022-03-11 12:00:27,395 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 12:00:27,430 P27740 INFO Train loss: 0.622181
+2022-03-11 12:00:27,430 P27740 INFO ************ Epoch=13 end ************
+2022-03-11 12:13:15,641 P27740 INFO [Metrics] logloss: 0.515959 - AUC: 0.822612
+2022-03-11 12:13:15,642 P27740 INFO Save best model: monitor(max): 0.306652
+2022-03-11 12:13:16,084 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 12:13:16,119 P27740 INFO Train loss: 0.620168
+2022-03-11 12:13:16,119 P27740 INFO ************ Epoch=14 end ************
+2022-03-11 12:26:03,216 P27740 INFO [Metrics] logloss: 0.514723 - AUC: 0.822813
+2022-03-11 12:26:03,216 P27740 INFO Save best model: monitor(max): 0.308090
+2022-03-11 12:26:03,648 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 12:26:03,691 P27740 INFO Train loss: 0.618613
+2022-03-11 12:26:03,691 P27740 INFO ************ Epoch=15 end ************
+2022-03-11 12:38:51,080 P27740 INFO [Metrics] logloss: 0.513329 - AUC: 0.823951
+2022-03-11 12:38:51,081 P27740 INFO Save best model: monitor(max): 0.310623
+2022-03-11 12:38:51,554 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 12:38:51,588 P27740 INFO Train loss: 0.617123
+2022-03-11 12:38:51,588 P27740 INFO ************ Epoch=16 end ************
+2022-03-11 12:51:38,771 P27740 INFO [Metrics] logloss: 0.511042 - AUC: 0.825877
+2022-03-11 12:51:38,772 P27740 INFO Save best model: monitor(max): 0.314835
+2022-03-11 12:51:39,213 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 12:51:39,247 P27740 INFO Train loss: 0.616331
+2022-03-11 12:51:39,247 P27740 INFO ************ Epoch=17 end ************
+2022-03-11 13:04:26,896 P27740 INFO [Metrics] logloss: 0.511394 - AUC: 0.825754
+2022-03-11 13:04:26,896 P27740 INFO Monitor(max) STOP: 0.314360 !
+2022-03-11 13:04:26,896 P27740 INFO Reduce learning rate on plateau: 0.000050
+2022-03-11 13:04:26,896 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 13:04:26,929 P27740 INFO Train loss: 0.615077
+2022-03-11 13:04:26,929 P27740 INFO ************ Epoch=18 end ************
+2022-03-11 13:17:14,561 P27740 INFO [Metrics] logloss: 0.481099 - AUC: 0.849566
+2022-03-11 13:17:14,561 P27740 INFO Save best model: monitor(max): 0.368468
+2022-03-11 13:17:15,167 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 13:17:15,202 P27740 INFO Train loss: 0.525168
+2022-03-11 13:17:15,202 P27740 INFO ************ Epoch=19 end ************
+2022-03-11 13:30:03,076 P27740 INFO [Metrics] logloss: 0.479571 - AUC: 0.852404
+2022-03-11 13:30:03,077 P27740 INFO Save best model: monitor(max): 0.372833
+2022-03-11 13:30:03,525 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 13:30:03,569 P27740 INFO Train loss: 0.468585
+2022-03-11 13:30:03,569 P27740 INFO ************ Epoch=20 end ************
+2022-03-11 13:42:51,185 P27740 INFO [Metrics] logloss: 0.488731 - AUC: 0.850102
+2022-03-11 13:42:51,185 P27740 INFO Monitor(max) STOP: 0.361370 !
+2022-03-11 13:42:51,185 P27740 INFO Reduce learning rate on plateau: 0.000005
+2022-03-11 13:42:51,185 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 13:42:51,221 P27740 INFO Train loss: 0.440094
+2022-03-11 13:42:51,221 P27740 INFO ************ Epoch=21 end ************
+2022-03-11 13:55:38,498 P27740 INFO [Metrics] logloss: 0.540178 - AUC: 0.844262
+2022-03-11 13:55:38,498 P27740 INFO Monitor(max) STOP: 0.304084 !
+2022-03-11 13:55:38,499 P27740 INFO Reduce learning rate on plateau: 0.000001
+2022-03-11 13:55:38,499 P27740 INFO Early stopping at epoch=22
+2022-03-11 13:55:38,499 P27740 INFO --- 2951/2951 batches finished ---
+2022-03-11 13:55:38,533 P27740 INFO Train loss: 0.376207
+2022-03-11 13:55:38,533 P27740 INFO Training finished.
+2022-03-11 13:55:38,533 P27740 INFO Load best model: /cache/FuxiCTR/benchmarks/KKBox/FGCNN_kkbox_x1/kkbox_x1_227d337d/FGCNN_kkbox_x1_014_48888fb8_model.ckpt
+2022-03-11 13:55:39,078 P27740 INFO ****** Validation evaluation ******
+2022-03-11 13:55:53,029 P27740 INFO [Metrics] logloss: 0.479571 - AUC: 0.852404
+2022-03-11 13:55:53,087 P27740 INFO ******** Test evaluation ********
+2022-03-11 13:55:53,087 P27740 INFO Loading data...
+2022-03-11 13:55:53,087 P27740 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/test.h5
+2022-03-11 13:55:53,159 P27740 INFO Test samples: total/737743, pos/371466, neg/366277, ratio/50.35%
+2022-03-11 13:55:53,159 P27740 INFO Loading test data done.
+2022-03-11 13:56:07,158 P27740 INFO [Metrics] logloss: 0.480056 - AUC: 0.852189
+
+```
diff --git a/ranking/ctr/FLEN/FLEN_avazu_x4_001/README.md b/ranking/ctr/FLEN/FLEN_avazu_x4_001/README.md
index 839f1425..4621c9d1 100644
--- a/ranking/ctr/FLEN/FLEN_avazu_x4_001/README.md
+++ b/ranking/ctr/FLEN/FLEN_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FLEN model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FLEN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FLEN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FLEN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FLEN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FM/FM_amazonelectronics_x1/README.md b/ranking/ctr/FM/FM_amazonelectronics_x1/README.md
index 49101633..f743c1fd 100644
--- a/ranking/ctr/FM/FM_amazonelectronics_x1/README.md
+++ b/ranking/ctr/FM/FM_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [FM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FM/FM_avazu_x1/README.md b/ranking/ctr/FM/FM_avazu_x1/README.md
index c1a7dda7..526ca873 100644
--- a/ranking/ctr/FM/FM_avazu_x1/README.md
+++ b/ranking/ctr/FM/FM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FM/FM_avazu_x4_001/README.md b/ranking/ctr/FM/FM_avazu_x4_001/README.md
index 74525cbb..9533901a 100644
--- a/ranking/ctr/FM/FM_avazu_x4_001/README.md
+++ b/ranking/ctr/FM/FM_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FM/FM_avazu_x4_002/README.md b/ranking/ctr/FM/FM_avazu_x4_002/README.md
index ceb43b79..63f64d85 100644
--- a/ranking/ctr/FM/FM_avazu_x4_002/README.md
+++ b/ranking/ctr/FM/FM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FM/FM_criteo_x1/README.md b/ranking/ctr/FM/FM_criteo_x1/README.md
index 72a00447..27a5e454 100644
--- a/ranking/ctr/FM/FM_criteo_x1/README.md
+++ b/ranking/ctr/FM/FM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FM/FM_criteo_x4_001/README.md b/ranking/ctr/FM/FM_criteo_x4_001/README.md
index 59648ecf..4aa6e54e 100644
--- a/ranking/ctr/FM/FM_criteo_x4_001/README.md
+++ b/ranking/ctr/FM/FM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FM/FM_criteo_x4_002/README.md b/ranking/ctr/FM/FM_criteo_x4_002/README.md
index 1d7b55cc..dab8c3d8 100644
--- a/ranking/ctr/FM/FM_criteo_x4_002/README.md
+++ b/ranking/ctr/FM/FM_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FM/FM_frappe_x1/README.md b/ranking/ctr/FM/FM_frappe_x1/README.md
index 45869d97..15e51a71 100644
--- a/ranking/ctr/FM/FM_frappe_x1/README.md
+++ b/ranking/ctr/FM/FM_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FM/FM_kkbox_x1/README.md b/ranking/ctr/FM/FM_kkbox_x1/README.md
index 194d4351..d65282a5 100644
--- a/ranking/ctr/FM/FM_kkbox_x1/README.md
+++ b/ranking/ctr/FM/FM_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FM/FM_kuaivideo_x1/README.md b/ranking/ctr/FM/FM_kuaivideo_x1/README.md
index a18c3d3d..7e30ece0 100644
--- a/ranking/ctr/FM/FM_kuaivideo_x1/README.md
+++ b/ranking/ctr/FM/FM_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [FM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FM/FM_microvideo1.7m_x1/README.md b/ranking/ctr/FM/FM_microvideo1.7m_x1/README.md
index 4060dcfd..aa5ed9e3 100644
--- a/ranking/ctr/FM/FM_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/FM/FM_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [FM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FM/FM_movielenslatest_x1/README.md b/ranking/ctr/FM/FM_movielenslatest_x1/README.md
index 5074c972..1720e389 100644
--- a/ranking/ctr/FM/FM_movielenslatest_x1/README.md
+++ b/ranking/ctr/FM/FM_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FM/FM_taobaoad_x1/README.md b/ranking/ctr/FM/FM_taobaoad_x1/README.md
index 4da38ae5..5dbbd8ed 100644
--- a/ranking/ctr/FM/FM_taobaoad_x1/README.md
+++ b/ranking/ctr/FM/FM_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FM model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [FM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FiBiNET/FiBiNET_avazu_x1/README.md b/ranking/ctr/FiBiNET/FiBiNET_avazu_x1/README.md
index c49d7231..45f9d876 100644
--- a/ranking/ctr/FiBiNET/FiBiNET_avazu_x1/README.md
+++ b/ranking/ctr/FiBiNET/FiBiNET_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiBiNET model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiBiNET](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiBiNET.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiBiNET](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiBiNET.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiBiNET/FiBiNET_avazu_x4_001/README.md b/ranking/ctr/FiBiNET/FiBiNET_avazu_x4_001/README.md
index b12c9e4d..395dcccb 100644
--- a/ranking/ctr/FiBiNET/FiBiNET_avazu_x4_001/README.md
+++ b/ranking/ctr/FiBiNET/FiBiNET_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiBiNET model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiBiNET](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiBiNET.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiBiNET](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiBiNET.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiBiNET/FiBiNET_avazu_x4_002/README.md b/ranking/ctr/FiBiNET/FiBiNET_avazu_x4_002/README.md
index 684cb709..ccd00983 100644
--- a/ranking/ctr/FiBiNET/FiBiNET_avazu_x4_002/README.md
+++ b/ranking/ctr/FiBiNET/FiBiNET_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiBiNET model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiBiNET](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiBiNET.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiBiNET](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiBiNET.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiBiNET/FiBiNET_criteo_x1/README.md b/ranking/ctr/FiBiNET/FiBiNET_criteo_x1/README.md
index fa17245c..8914e294 100644
--- a/ranking/ctr/FiBiNET/FiBiNET_criteo_x1/README.md
+++ b/ranking/ctr/FiBiNET/FiBiNET_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiBiNET model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiBiNET](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiBiNET.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiBiNET](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiBiNET.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiBiNET/FiBiNET_criteo_x4_001/README.md b/ranking/ctr/FiBiNET/FiBiNET_criteo_x4_001/README.md
index de2653b8..e65573e3 100644
--- a/ranking/ctr/FiBiNET/FiBiNET_criteo_x4_001/README.md
+++ b/ranking/ctr/FiBiNET/FiBiNET_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiBiNET model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiBiNET](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiBiNET.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiBiNET](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiBiNET.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiBiNET/FiBiNET_criteo_x4_002/README.md b/ranking/ctr/FiBiNET/FiBiNET_criteo_x4_002/README.md
index 05409dbf..947d00af 100644
--- a/ranking/ctr/FiBiNET/FiBiNET_criteo_x4_002/README.md
+++ b/ranking/ctr/FiBiNET/FiBiNET_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiBiNET model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiBiNET](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiBiNET.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiBiNET](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiBiNET.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiBiNET/FiBiNET_frappe_x1/README.md b/ranking/ctr/FiBiNET/FiBiNET_frappe_x1/README.md
index 58d3af6d..e2a51f80 100644
--- a/ranking/ctr/FiBiNET/FiBiNET_frappe_x1/README.md
+++ b/ranking/ctr/FiBiNET/FiBiNET_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiBiNET model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiBiNET](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiBiNET.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiBiNET](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiBiNET.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiBiNET/FiBiNET_kkbox_x1/README.md b/ranking/ctr/FiBiNET/FiBiNET_kkbox_x1/README.md
index fc592004..e419dd38 100644
--- a/ranking/ctr/FiBiNET/FiBiNET_kkbox_x1/README.md
+++ b/ranking/ctr/FiBiNET/FiBiNET_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiBiNET model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiBiNET](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiBiNET.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiBiNET](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiBiNET.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiBiNET/FiBiNET_movielenslatest_x1/README.md b/ranking/ctr/FiBiNET/FiBiNET_movielenslatest_x1/README.md
index 299ebf09..190f61b1 100644
--- a/ranking/ctr/FiBiNET/FiBiNET_movielenslatest_x1/README.md
+++ b/ranking/ctr/FiBiNET/FiBiNET_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiBiNET model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiBiNET](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiBiNET.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiBiNET](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiBiNET.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiGNN/FiGNN_avazu_x1/README.md b/ranking/ctr/FiGNN/FiGNN_avazu_x1/README.md
index 59502d23..d7fe60aa 100644
--- a/ranking/ctr/FiGNN/FiGNN_avazu_x1/README.md
+++ b/ranking/ctr/FiGNN/FiGNN_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiGNN model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiGNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiGNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiGNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiGNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiGNN/FiGNN_avazu_x4_001/README.md b/ranking/ctr/FiGNN/FiGNN_avazu_x4_001/README.md
index 8fba0791..5713f95a 100644
--- a/ranking/ctr/FiGNN/FiGNN_avazu_x4_001/README.md
+++ b/ranking/ctr/FiGNN/FiGNN_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiGNN model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiGNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiGNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiGNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiGNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiGNN/FiGNN_avazu_x4_002/README.md b/ranking/ctr/FiGNN/FiGNN_avazu_x4_002/README.md
index 00e5d9f5..2092708b 100644
--- a/ranking/ctr/FiGNN/FiGNN_avazu_x4_002/README.md
+++ b/ranking/ctr/FiGNN/FiGNN_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiGNN model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiGNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiGNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiGNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiGNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiGNN/FiGNN_criteo_x1/README.md b/ranking/ctr/FiGNN/FiGNN_criteo_x1/README.md
index bf2151f3..36fd6e17 100644
--- a/ranking/ctr/FiGNN/FiGNN_criteo_x1/README.md
+++ b/ranking/ctr/FiGNN/FiGNN_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiGNN model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiGNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiGNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiGNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiGNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiGNN/FiGNN_criteo_x4_001/README.md b/ranking/ctr/FiGNN/FiGNN_criteo_x4_001/README.md
index c6c45ac9..4ebb24dc 100644
--- a/ranking/ctr/FiGNN/FiGNN_criteo_x4_001/README.md
+++ b/ranking/ctr/FiGNN/FiGNN_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiGNN model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiGNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiGNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiGNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiGNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiGNN/FiGNN_criteo_x4_002/README.md b/ranking/ctr/FiGNN/FiGNN_criteo_x4_002/README.md
index 89b3fc6f..21c25a8a 100644
--- a/ranking/ctr/FiGNN/FiGNN_criteo_x4_002/README.md
+++ b/ranking/ctr/FiGNN/FiGNN_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiGNN model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiGNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiGNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiGNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiGNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiGNN/FiGNN_frappe_x1/README.md b/ranking/ctr/FiGNN/FiGNN_frappe_x1/README.md
index 1b8a16c3..c31d32f2 100644
--- a/ranking/ctr/FiGNN/FiGNN_frappe_x1/README.md
+++ b/ranking/ctr/FiGNN/FiGNN_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiGNN model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiGNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiGNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiGNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiGNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiGNN/FiGNN_kkbox_x1/README.md b/ranking/ctr/FiGNN/FiGNN_kkbox_x1/README.md
index 9e73bad1..de4e8d0b 100644
--- a/ranking/ctr/FiGNN/FiGNN_kkbox_x1/README.md
+++ b/ranking/ctr/FiGNN/FiGNN_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiGNN model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiGNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiGNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FiGNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FiGNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FiGNN/FiGNN_movielenslatest_x1/README.md b/ranking/ctr/FiGNN/FiGNN_movielenslatest_x1/README.md
index 6f332b1e..0fc4c3d1 100644
--- a/ranking/ctr/FiGNN/FiGNN_movielenslatest_x1/README.md
+++ b/ranking/ctr/FiGNN/FiGNN_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FiGNN model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiGNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiGNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FiGNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FiGNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FinalMLP/FinalMLP_avazu_x1/README.md b/ranking/ctr/FinalMLP/FinalMLP_avazu_x1/README.md
index a79d28f5..13c1f549 100644
--- a/ranking/ctr/FinalMLP/FinalMLP_avazu_x1/README.md
+++ b/ranking/ctr/FinalMLP/FinalMLP_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FinalMLP model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -39,11 +39,11 @@ Please refer to the BARS dataset [Avazu_x1](https://github.com/openbenchmark/BAR
### Code
-We use the [FinalMLP](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FinalMLP) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FinalMLP](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FinalMLP) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_001_006_a7c95fe1.log b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_001_006_a7c95fe1.log
new file mode 100644
index 00000000..9a77bf30
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_001_006_a7c95fe1.log
@@ -0,0 +1,110 @@
+2024-02-22 12:55:35,485 P1084821 INFO Params: {
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x4_001_a31210da",
+ "debug_mode": "False",
+ "early_stop_patience": "1",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-09",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': False, 'dtype': 'str', 'name': 'id', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'hour', 'preprocess': 'convert_hour', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': ['C1', 'banner_pos', 'site_id', 'site_domain', 'site_category', 'app_id', 'app_domain', 'app_category', 'device_id', 'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekday', 'preprocess': 'convert_weekday', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekend', 'preprocess': 'convert_weekend', 'type': 'categorical'}]",
+ "feature_config": "None",
+ "feature_specs": "None",
+ "fs1_context": "[]",
+ "fs2_context": "[]",
+ "fs_hidden_units": "[1000]",
+ "gpu": "5",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'click'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "2",
+ "mlp1_batch_norm": "True",
+ "mlp1_dropout": "0",
+ "mlp1_hidden_activations": "relu",
+ "mlp1_hidden_units": "[2000, 2000, 2000]",
+ "mlp2_batch_norm": "False",
+ "mlp2_dropout": "0",
+ "mlp2_hidden_activations": "relu",
+ "mlp2_hidden_units": "[500]",
+ "model": "FinalMLP",
+ "model_id": "FinalMLP_avazu_x4_001_006_a7c95fe1",
+ "model_root": "./checkpoints/",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_heads": "20",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x4/test.csv",
+ "train_data": "../data/Avazu/Avazu_x4/train.csv",
+ "use_features": "None",
+ "use_fs": "False",
+ "valid_data": "../data/Avazu/Avazu_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-22 12:55:35,486 P1084821 INFO Set up feature processor...
+2024-02-22 12:55:35,486 P1084821 WARNING Skip rebuilding ../data/Avazu/avazu_x4_001_a31210da/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-22 12:55:35,487 P1084821 INFO Load feature_map from json: ../data/Avazu/avazu_x4_001_a31210da/feature_map.json
+2024-02-22 12:55:35,487 P1084821 INFO Set column index...
+2024-02-22 12:55:35,487 P1084821 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2556}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 10}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 434}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 70}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 173}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 62}",
+ "app_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 33}",
+ "app_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 400}",
+ "app_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6545}",
+ "banner_pos": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "device_conn_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "device_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 820509}",
+ "device_ip": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2903322}",
+ "device_model": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7259}",
+ "device_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7}",
+ "hour": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 26}",
+ "site_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "site_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5461}",
+ "site_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4051}",
+ "weekday": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "weekend": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4}"
+}
+2024-02-22 12:55:41,390 P1084821 INFO Total number of parameters: 69046986.
+2024-02-22 12:55:41,391 P1084821 INFO Loading datasets...
+2024-02-22 12:56:01,114 P1084821 INFO Train samples: total/32343172, blocks/1
+2024-02-22 12:56:03,573 P1084821 INFO Validation samples: total/4042897, blocks/1
+2024-02-22 12:56:03,573 P1084821 INFO Loading train and validation data done.
+2024-02-22 12:56:03,574 P1084821 INFO Start training: 3235 batches/epoch
+2024-02-22 12:56:03,574 P1084821 INFO ************ Epoch=1 start ************
+2024-02-22 13:02:12,158 P1084821 INFO Train loss: 0.382539
+2024-02-22 13:02:12,158 P1084821 INFO Evaluation @epoch 1 - batch 3235:
+2024-02-22 13:02:29,486 P1084821 INFO [Metrics] AUC: 0.793356 - logloss: 0.371948
+2024-02-22 13:02:29,489 P1084821 INFO Save best model: monitor(max)=0.421408
+2024-02-22 13:02:30,208 P1084821 INFO ************ Epoch=1 end ************
+2024-02-22 13:08:38,065 P1084821 INFO Train loss: 0.331726
+2024-02-22 13:08:38,065 P1084821 INFO Evaluation @epoch 2 - batch 3235:
+2024-02-22 13:08:55,613 P1084821 INFO [Metrics] AUC: 0.789218 - logloss: 0.380356
+2024-02-22 13:08:55,619 P1084821 INFO Monitor(max)=0.408862 STOP!
+2024-02-22 13:08:55,619 P1084821 INFO Reduce learning rate on plateau: 0.000100
+2024-02-22 13:08:55,619 P1084821 INFO ********* Epoch==2 early stop *********
+2024-02-22 13:08:55,671 P1084821 INFO Training finished.
+2024-02-22 13:08:55,671 P1084821 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/avazu_x4_001_a31210da/FinalMLP_avazu_x4_001_006_a7c95fe1.model
+2024-02-22 13:08:55,895 P1084821 INFO ****** Validation evaluation ******
+2024-02-22 13:09:13,147 P1084821 INFO [Metrics] AUC: 0.793356 - logloss: 0.371948
+2024-02-22 13:09:13,251 P1084821 INFO ******** Test evaluation ********
+2024-02-22 13:09:13,251 P1084821 INFO Loading datasets...
+2024-02-22 13:09:15,854 P1084821 INFO Test samples: total/4042898, blocks/1
+2024-02-22 13:09:15,854 P1084821 INFO Loading test data done.
+2024-02-22 13:09:33,591 P1084821 INFO [Metrics] AUC: 0.793537 - logloss: 0.371862
diff --git a/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03.csv b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03.csv
new file mode 100644
index 00000000..cc42e452
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03.csv
@@ -0,0 +1,32 @@
+ 20240222-130933,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_006_a7c95fe1 --gpu 5,[exp_id] FinalMLP_avazu_x4_001_006_a7c95fe1,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793356 - logloss: 0.371948,[test] AUC: 0.793537 - logloss: 0.371862
+ 20240222-131038,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_014_be1abfb2 --gpu 2,[exp_id] FinalMLP_avazu_x4_001_014_be1abfb2,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793199 - logloss: 0.372069,[test] AUC: 0.793337 - logloss: 0.371997
+ 20240222-131243,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_016_59f52e22 --gpu 0,[exp_id] FinalMLP_avazu_x4_001_016_59f52e22,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793076 - logloss: 0.372103,[test] AUC: 0.793130 - logloss: 0.372074
+ 20240222-131016,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_008_6b2432f6 --gpu 7,[exp_id] FinalMLP_avazu_x4_001_008_6b2432f6,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793038 - logloss: 0.372136,[test] AUC: 0.793103 - logloss: 0.372108
+ 20240222-132443,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_026_be1abfb2 --gpu 6,[exp_id] FinalMLP_avazu_x4_001_026_be1abfb2,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792930 - logloss: 0.372119,[test] AUC: 0.793082 - logloss: 0.372044
+ 20240222-133848,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_038_be1abfb2 --gpu 6,[exp_id] FinalMLP_avazu_x4_001_038_be1abfb2,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792791 - logloss: 0.372377,[test] AUC: 0.793013 - logloss: 0.372267
+ 20240222-132311,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_018_a7c95fe1 --gpu 5,[exp_id] FinalMLP_avazu_x4_001_018_a7c95fe1,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792979 - logloss: 0.372142,[test] AUC: 0.792995 - logloss: 0.372129
+ 20240222-134319,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_048_a37ca3a4 --gpu 0,[exp_id] FinalMLP_avazu_x4_001_048_a37ca3a4,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792711 - logloss: 0.372223,[test] AUC: 0.792937 - logloss: 0.372083
+ 20240222-134015,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_040_59f52e22 --gpu 4,[exp_id] FinalMLP_avazu_x4_001_040_59f52e22,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792834 - logloss: 0.372171,[test] AUC: 0.792932 - logloss: 0.372107
+ 20240222-135226,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_050_be1abfb2 --gpu 3,[exp_id] FinalMLP_avazu_x4_001_050_be1abfb2,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792791 - logloss: 0.372180,[test] AUC: 0.792883 - logloss: 0.372137
+ 20240222-134129,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_046_f26688ec --gpu 2,[exp_id] FinalMLP_avazu_x4_001_046_f26688ec,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792600 - logloss: 0.372466,[test] AUC: 0.792756 - logloss: 0.372405
+ 20240222-135516,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_058_f26688ec --gpu 5,[exp_id] FinalMLP_avazu_x4_001_058_f26688ec,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792759 - logloss: 0.372223,[test] AUC: 0.792741 - logloss: 0.372280
+ 20240222-132609,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_030_c7c4d7b3 --gpu 2,[exp_id] FinalMLP_avazu_x4_001_030_c7c4d7b3,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792571 - logloss: 0.372354,[test] AUC: 0.792722 - logloss: 0.372261
+ 20240222-132442,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_020_6b2432f6 --gpu 4,[exp_id] FinalMLP_avazu_x4_001_020_6b2432f6,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792563 - logloss: 0.372535,[test] AUC: 0.792675 - logloss: 0.372509
+ 20240222-132748,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_032_be1abfb2 --gpu 0,[exp_id] FinalMLP_avazu_x4_001_032_be1abfb2,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792523 - logloss: 0.372207,[test] AUC: 0.792649 - logloss: 0.372105
+ 20240222-132351,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_022_f8853d62 --gpu 3,[exp_id] FinalMLP_avazu_x4_001_022_f8853d62,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792489 - logloss: 0.372269,[test] AUC: 0.792636 - logloss: 0.372192
+ 20240222-135400,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_052_59f52e22 --gpu 6,[exp_id] FinalMLP_avazu_x4_001_052_59f52e22,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792460 - logloss: 0.372730,[test] AUC: 0.792630 - logloss: 0.372639
+ 20240222-132511,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_024_a7c95fe1 --gpu 1,[exp_id] FinalMLP_avazu_x4_001_024_a7c95fe1,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792361 - logloss: 0.372294,[test] AUC: 0.792582 - logloss: 0.372121
+ 20240222-132555,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_028_59f52e22 --gpu 7,[exp_id] FinalMLP_avazu_x4_001_028_59f52e22,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792420 - logloss: 0.372355,[test] AUC: 0.792574 - logloss: 0.372280
+ 20240222-135622,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_062_7d43e59a --gpu 1,[exp_id] FinalMLP_avazu_x4_001_062_7d43e59a,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792456 - logloss: 0.372317,[test] AUC: 0.792572 - logloss: 0.372247
+ 20240222-134004,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_042_a37ca3a4 --gpu 7,[exp_id] FinalMLP_avazu_x4_001_042_a37ca3a4,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792387 - logloss: 0.372458,[test] AUC: 0.792544 - logloss: 0.372373
+ 20240222-135345,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_054_c7c4d7b3 --gpu 7,[exp_id] FinalMLP_avazu_x4_001_054_c7c4d7b3,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792398 - logloss: 0.372381,[test] AUC: 0.792513 - logloss: 0.372320
+ 20240222-135829,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_064_f26688ec --gpu 0,[exp_id] FinalMLP_avazu_x4_001_064_f26688ec,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792290 - logloss: 0.372356,[test] AUC: 0.792493 - logloss: 0.372224
+ 20240222-135602,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_060_a37ca3a4 --gpu 2,[exp_id] FinalMLP_avazu_x4_001_060_a37ca3a4,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792374 - logloss: 0.372379,[test] AUC: 0.792486 - logloss: 0.372335
+ 20240222-130933,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_002_6b2432f6 --gpu 1,[exp_id] FinalMLP_avazu_x4_001_002_6b2432f6,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792351 - logloss: 0.380298,[test] AUC: 0.792474 - logloss: 0.380290
+ 20240222-135447,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_056_be1abfb2 --gpu 4,[exp_id] FinalMLP_avazu_x4_001_056_be1abfb2,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792182 - logloss: 0.372398,[test] AUC: 0.792402 - logloss: 0.372242
+ 20240222-131021,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_004_db95edba --gpu 3,[exp_id] FinalMLP_avazu_x4_001_004_db95edba,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792165 - logloss: 0.380272,[test] AUC: 0.792387 - logloss: 0.380193
+ 20240222-131010,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_010_59f52e22 --gpu 4,[exp_id] FinalMLP_avazu_x4_001_010_59f52e22,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792140 - logloss: 0.380482,[test] AUC: 0.792358 - logloss: 0.380424
+ 20240222-133803,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_034_59f52e22 --gpu 3,[exp_id] FinalMLP_avazu_x4_001_034_59f52e22,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792096 - logloss: 0.381319,[test] AUC: 0.792166 - logloss: 0.381356
+ 20240222-131101,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_012_e59ed2b2 --gpu 6,[exp_id] FinalMLP_avazu_x4_001_012_e59ed2b2,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.791830 - logloss: 0.380470,[test] AUC: 0.791903 - logloss: 0.380480
+ 20240222-134128,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_044_e0d6c4aa --gpu 1,[exp_id] FinalMLP_avazu_x4_001_044_e0d6c4aa,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.791680 - logloss: 0.380597,[test] AUC: 0.791805 - logloss: 0.380571
+ 20240222-133930,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_036_e59ed2b2 --gpu 5,[exp_id] FinalMLP_avazu_x4_001_036_e59ed2b2,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.752105 - logloss: 0.966729,[test] AUC: 0.752173 - logloss: 0.967141
diff --git a/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03.yaml b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03.yaml
new file mode 100644
index 00000000..ccf19411
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03.yaml
@@ -0,0 +1,44 @@
+base_config: ../model_zoo/FinalMLP/config/
+base_expid: FinalMLP_default
+dataset_id: avazu_x4_001
+
+dataset_config:
+ avazu_x4_001:
+ data_root: ../data/Avazu/
+ data_format: csv
+ train_data: ../data/Avazu/Avazu_x4/train.csv
+ valid_data: ../data/Avazu/Avazu_x4/valid.csv
+ test_data: ../data/Avazu/Avazu_x4/test.csv
+ min_categr_count: 2
+ feature_cols:
+ - {name: id, active: False, dtype: str, type: categorical}
+ - {name: hour, active: True, dtype: str, type: categorical, preprocess: convert_hour}
+ - {name: [C1,banner_pos,site_id,site_domain,site_category,app_id,app_domain,app_category,device_id,
+ device_ip,device_model,device_type,device_conn_type,C14,C15,C16,C17,C18,C19,C20,C21],
+ active: True, dtype: str, type: categorical}
+ - {name: weekday, active: True, dtype: str, type: categorical, preprocess: convert_weekday}
+ - {name: weekend, active: True, dtype: str, type: categorical, preprocess: convert_weekend}
+ label_col: {name: click, dtype: float}
+
+
+tuner_space:
+ model_root: './checkpoints/'
+ embedding_dim: 16
+ embedding_regularizer: 1.e-9
+ mlp1_batch_norm: [True, False]
+ mlp2_batch_norm: [True, False]
+ mlp1_hidden_units: [[2000, 2000, 2000]]
+ mlp2_hidden_units: [[500]]
+ mlp1_dropout: [0, 0.1]
+ mlp2_dropout: [0, 0.1]
+ use_fs: [False, True]
+ fs_hidden_units: [[1000]]
+ fs1_context: [[]]
+ fs2_context: [[]]
+ num_heads: [1, 20]
+ learning_rate: 1.e-3
+ batch_size: 10000
+ seed: 2019
+ monitor: {'AUC': 1, 'logloss': -1}
+ metrics: [['AUC', 'logloss']]
+ early_stop_patience: 1
diff --git a/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03/dataset_config.yaml b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03/dataset_config.yaml
new file mode 100644
index 00000000..0de8b62a
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03/dataset_config.yaml
@@ -0,0 +1,19 @@
+avazu_x4_001_a31210da:
+ data_format: csv
+ data_root: ../data/Avazu/
+ feature_cols:
+ - {active: false, dtype: str, name: id, type: categorical}
+ - {active: true, dtype: str, name: hour, preprocess: convert_hour, type: categorical}
+ - active: true
+ dtype: str
+ name: [C1, banner_pos, site_id, site_domain, site_category, app_id, app_domain,
+ app_category, device_id, device_ip, device_model, device_type, device_conn_type,
+ C14, C15, C16, C17, C18, C19, C20, C21]
+ type: categorical
+ - {active: true, dtype: str, name: weekday, preprocess: convert_weekday, type: categorical}
+ - {active: true, dtype: str, name: weekend, preprocess: convert_weekend, type: categorical}
+ label_col: {dtype: float, name: click}
+ min_categr_count: 2
+ test_data: ../data/Avazu/Avazu_x4/test.csv
+ train_data: ../data/Avazu/Avazu_x4/train.csv
+ valid_data: ../data/Avazu/Avazu_x4/valid.csv
diff --git a/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03/model_config.yaml b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03/model_config.yaml
new file mode 100644
index 00000000..f88847be
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03/model_config.yaml
@@ -0,0 +1,2688 @@
+FinalMLP_avazu_x4_001_001_1d65657a:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_002_6b2432f6:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_003_7631d099:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_004_db95edba:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_005_c43a840b:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_006_a7c95fe1:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_007_1d65657a:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_008_6b2432f6:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_009_0a559054:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_010_59f52e22:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_011_b5298b00:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_012_e59ed2b2:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_013_a56839db:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_014_be1abfb2:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_015_0a559054:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_016_59f52e22:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_017_c43a840b:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_018_a7c95fe1:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_019_1d65657a:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_020_6b2432f6:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_021_86251649:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_022_f8853d62:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_023_c43a840b:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_024_a7c95fe1:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_025_a56839db:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_026_be1abfb2:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_027_0a559054:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_028_59f52e22:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_029_076ab2bb:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_030_c7c4d7b3:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_031_a56839db:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_032_be1abfb2:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_033_0a559054:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_034_59f52e22:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_035_b5298b00:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_036_e59ed2b2:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_037_a56839db:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_038_be1abfb2:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_039_0a559054:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_040_59f52e22:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_041_9ff2072f:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_042_a37ca3a4:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_043_808f60ad:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_044_e0d6c4aa:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_045_f743c576:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_046_f26688ec:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_047_9ff2072f:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_048_a37ca3a4:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: true
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_049_a56839db:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_050_be1abfb2:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_051_0a559054:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_052_59f52e22:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_053_076ab2bb:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_054_c7c4d7b3:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_055_a56839db:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_056_be1abfb2:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_057_f743c576:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_058_f26688ec:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_059_9ff2072f:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_060_a37ca3a4:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: true
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_061_b6808714:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_062_7d43e59a:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_avazu_x4_001_063_f743c576:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 1
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
+FinalMLP_avazu_x4_001_064_f26688ec:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [2000, 2000, 2000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 20
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: true
+ verbose: 1
diff --git a/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/README.md b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/README.md
new file mode 100644
index 00000000..dc3558a7
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/README.md
@@ -0,0 +1,186 @@
+## FinalMLP_avazu_x4_001
+
+A hands-on guide to run the FinalMLP model on the Avazu_x4 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+
+| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
+|:-----------------------------:|:-----------:|:--------:|:--------:|-------|
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.7.10
+ pytorch: 1.10.2+cu102
+ pandas: 1.1.5
+ numpy: 1.19.5
+ scipy: 1.5.2
+ sklearn: 0.22.1
+ pyyaml: 6.0.1
+ h5py: 2.8.0
+ tqdm: 4.64.0
+ keras_preprocessing: 1.1.2
+ fuxictr: 2.2.0
+
+ ```
+
+### Dataset
+Please refer to [Avazu_x4]([Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4)) to get the dataset details.
+
+### Code
+
+We use the [FinalMLP](https://github.com/reczoo/FuxiCTR/tree/v2.2.0/model_zoo/FinalMLP) model code from [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/tree/v2.2.0) for this experiment.
+
+Running steps:
+
+1. Download [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.2.0.zip) and install all the dependencies listed in the [environments](#environments).
+
+ ```bash
+ pip uninstall fuxictr
+ pip install fuxictr==2.2.0
+ ```
+
+2. Create a data directory and put the downloaded data files in `../data/Avazu/Avazu_x4`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [FinalMLP_avazu_x4_tuner_config_03](./FinalMLP_avazu_x4_tuner_config_03). Please make sure that the data paths in `dataset_config.yaml` are correctly set.
+
+4. Run the following script to start training and evaluation.
+
+ ```bash
+ cd FuxiCTR/model_zoo/FinalMLP
+ nohup python run_expid.py --config YOUR_PATH/FinalMLP/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_006_a7c95fe1 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.793537 | 0.371862 |
+
+
+### Logs
+```python
+2024-02-22 12:55:35,485 P1084821 INFO Params: {
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x4_001_a31210da",
+ "debug_mode": "False",
+ "early_stop_patience": "1",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-09",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': False, 'dtype': 'str', 'name': 'id', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'hour', 'preprocess': 'convert_hour', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': ['C1', 'banner_pos', 'site_id', 'site_domain', 'site_category', 'app_id', 'app_domain', 'app_category', 'device_id', 'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekday', 'preprocess': 'convert_weekday', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekend', 'preprocess': 'convert_weekend', 'type': 'categorical'}]",
+ "feature_config": "None",
+ "feature_specs": "None",
+ "fs1_context": "[]",
+ "fs2_context": "[]",
+ "fs_hidden_units": "[1000]",
+ "gpu": "5",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'click'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "2",
+ "mlp1_batch_norm": "True",
+ "mlp1_dropout": "0",
+ "mlp1_hidden_activations": "relu",
+ "mlp1_hidden_units": "[2000, 2000, 2000]",
+ "mlp2_batch_norm": "False",
+ "mlp2_dropout": "0",
+ "mlp2_hidden_activations": "relu",
+ "mlp2_hidden_units": "[500]",
+ "model": "FinalMLP",
+ "model_id": "FinalMLP_avazu_x4_001_006_a7c95fe1",
+ "model_root": "./checkpoints/",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_heads": "20",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x4/test.csv",
+ "train_data": "../data/Avazu/Avazu_x4/train.csv",
+ "use_features": "None",
+ "use_fs": "False",
+ "valid_data": "../data/Avazu/Avazu_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-22 12:55:35,486 P1084821 INFO Set up feature processor...
+2024-02-22 12:55:35,486 P1084821 WARNING Skip rebuilding ../data/Avazu/avazu_x4_001_a31210da/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-22 12:55:35,487 P1084821 INFO Load feature_map from json: ../data/Avazu/avazu_x4_001_a31210da/feature_map.json
+2024-02-22 12:55:35,487 P1084821 INFO Set column index...
+2024-02-22 12:55:35,487 P1084821 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2556}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 10}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 434}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 70}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 173}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 62}",
+ "app_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 33}",
+ "app_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 400}",
+ "app_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6545}",
+ "banner_pos": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "device_conn_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "device_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 820509}",
+ "device_ip": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2903322}",
+ "device_model": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7259}",
+ "device_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7}",
+ "hour": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 26}",
+ "site_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "site_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5461}",
+ "site_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4051}",
+ "weekday": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "weekend": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4}"
+}
+2024-02-22 12:55:41,390 P1084821 INFO Total number of parameters: 69046986.
+2024-02-22 12:55:41,391 P1084821 INFO Loading datasets...
+2024-02-22 12:56:01,114 P1084821 INFO Train samples: total/32343172, blocks/1
+2024-02-22 12:56:03,573 P1084821 INFO Validation samples: total/4042897, blocks/1
+2024-02-22 12:56:03,573 P1084821 INFO Loading train and validation data done.
+2024-02-22 12:56:03,574 P1084821 INFO Start training: 3235 batches/epoch
+2024-02-22 12:56:03,574 P1084821 INFO ************ Epoch=1 start ************
+2024-02-22 13:02:12,158 P1084821 INFO Train loss: 0.382539
+2024-02-22 13:02:12,158 P1084821 INFO Evaluation @epoch 1 - batch 3235:
+2024-02-22 13:02:29,486 P1084821 INFO [Metrics] AUC: 0.793356 - logloss: 0.371948
+2024-02-22 13:02:29,489 P1084821 INFO Save best model: monitor(max)=0.421408
+2024-02-22 13:02:30,208 P1084821 INFO ************ Epoch=1 end ************
+2024-02-22 13:08:38,065 P1084821 INFO Train loss: 0.331726
+2024-02-22 13:08:38,065 P1084821 INFO Evaluation @epoch 2 - batch 3235:
+2024-02-22 13:08:55,613 P1084821 INFO [Metrics] AUC: 0.789218 - logloss: 0.380356
+2024-02-22 13:08:55,619 P1084821 INFO Monitor(max)=0.408862 STOP!
+2024-02-22 13:08:55,619 P1084821 INFO Reduce learning rate on plateau: 0.000100
+2024-02-22 13:08:55,619 P1084821 INFO ********* Epoch==2 early stop *********
+2024-02-22 13:08:55,671 P1084821 INFO Training finished.
+2024-02-22 13:08:55,671 P1084821 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/avazu_x4_001_a31210da/FinalMLP_avazu_x4_001_006_a7c95fe1.model
+2024-02-22 13:08:55,895 P1084821 INFO ****** Validation evaluation ******
+2024-02-22 13:09:13,147 P1084821 INFO [Metrics] AUC: 0.793356 - logloss: 0.371948
+2024-02-22 13:09:13,251 P1084821 INFO ******** Test evaluation ********
+2024-02-22 13:09:13,251 P1084821 INFO Loading datasets...
+2024-02-22 13:09:15,854 P1084821 INFO Test samples: total/4042898, blocks/1
+2024-02-22 13:09:15,854 P1084821 INFO Loading test data done.
+2024-02-22 13:09:33,591 P1084821 INFO [Metrics] AUC: 0.793537 - logloss: 0.371862
+
+```
diff --git a/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/environments.txt b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/environments.txt
new file mode 100644
index 00000000..5415575c
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/environments.txt
@@ -0,0 +1,18 @@
+[Hardware]
+CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+GPU: Tesla V100 32G
+RAM: 755G
+
+[Software]
+CUDA: 10.2
+python: 3.7.10
+pytorch: 1.10.2+cu102
+pandas: 1.1.5
+numpy: 1.19.5
+scipy: 1.5.2
+sklearn: 0.22.1
+pyyaml: 6.0.1
+h5py: 2.8.0
+tqdm: 4.64.0
+keras_preprocessing: 1.1.2
+fuxictr: 2.2.0
diff --git a/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/results.csv b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/results.csv
new file mode 100644
index 00000000..c2e923ce
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_avazu_x4_001/results.csv
@@ -0,0 +1 @@
+ 20240222-130933,[command] python run_expid.py --config Avazu_x4/FinalMLP_avazu_x4_001/FinalMLP_avazu_x4_tuner_config_03 --expid FinalMLP_avazu_x4_001_006_a7c95fe1 --gpu 5,[exp_id] FinalMLP_avazu_x4_001_006_a7c95fe1,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793356 - logloss: 0.371948,[test] AUC: 0.793537 - logloss: 0.371862
diff --git a/ranking/ctr/FinalMLP/FinalMLP_criteo_x1/README.md b/ranking/ctr/FinalMLP/FinalMLP_criteo_x1/README.md
index 8e149d39..4ebf3b5c 100644
--- a/ranking/ctr/FinalMLP/FinalMLP_criteo_x1/README.md
+++ b/ranking/ctr/FinalMLP/FinalMLP_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FinalMLP model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -39,11 +39,11 @@ Please refer to the BARS dataset [Criteo_x1](https://github.com/openbenchmark/BA
### Code
-We use the [FinalMLP](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FinalMLP) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FinalMLP](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FinalMLP) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_001_002_53a37ddd.log b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_001_002_53a37ddd.log
new file mode 100644
index 00000000..65e2dfea
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_001_002_53a37ddd.log
@@ -0,0 +1,172 @@
+2024-02-20 19:33:45,916 P3047876 INFO Params: {
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x4_001_a5e05ce7",
+ "debug_mode": "False",
+ "early_stop_patience": "2",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'fill_na': 0, 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'preprocess': 'convert_to_bucket', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'fill_na': '', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "feature_config": "None",
+ "feature_specs": "None",
+ "fs1_context": "[]",
+ "fs2_context": "[]",
+ "fs_hidden_units": "[1000]",
+ "gpu": "1",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'Label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "10",
+ "mlp1_batch_norm": "False",
+ "mlp1_dropout": "0.2",
+ "mlp1_hidden_activations": "relu",
+ "mlp1_hidden_units": "[1000, 1000, 1000, 1000, 1000]",
+ "mlp2_batch_norm": "False",
+ "mlp2_dropout": "0.1",
+ "mlp2_hidden_activations": "relu",
+ "mlp2_hidden_units": "[500]",
+ "model": "FinalMLP",
+ "model_id": "FinalMLP_criteo_x4_001_002_53a37ddd",
+ "model_root": "./checkpoints/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_heads": "50",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x4/test.csv",
+ "train_data": "../data/Criteo/Criteo_x4/train.csv",
+ "use_features": "None",
+ "use_fs": "False",
+ "valid_data": "../data/Criteo/Criteo_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-20 19:33:45,917 P3047876 INFO Set up feature processor...
+2024-02-20 19:33:45,917 P3047876 WARNING Skip rebuilding ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-20 19:33:45,917 P3047876 INFO Load feature_map from json: ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json
+2024-02-20 19:33:45,917 P3047876 INFO Set column index...
+2024-02-20 19:33:45,918 P3047876 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 1446}",
+ "C10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 39530}",
+ "C11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5131}",
+ "C12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 156656}",
+ "C13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 3176}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 28}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11043}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 148913}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 12}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4560}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2003}",
+ "C2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 554}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 154564}",
+ "C22": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C23": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 17}",
+ "C24": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 53031}",
+ "C25": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 82}",
+ "C26": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 40955}",
+ "C3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 157339}",
+ "C4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 117822}",
+ "C5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 306}",
+ "C6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11882}",
+ "C8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 630}",
+ "C9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "I1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 44}",
+ "I10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "I11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "I12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 37}",
+ "I13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 72}",
+ "I2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 99}",
+ "I3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 122}",
+ "I4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 41}",
+ "I5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 220}",
+ "I6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 112}",
+ "I7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 80}",
+ "I8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 69}",
+ "I9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 92}"
+}
+2024-02-20 19:33:51,214 P3047876 INFO Total number of parameters: 19524954.
+2024-02-20 19:33:51,215 P3047876 INFO Loading datasets...
+2024-02-20 19:34:26,891 P3047876 INFO Train samples: total/36672493, blocks/1
+2024-02-20 19:34:31,381 P3047876 INFO Validation samples: total/4584062, blocks/1
+2024-02-20 19:34:31,381 P3047876 INFO Loading train and validation data done.
+2024-02-20 19:34:31,381 P3047876 INFO Start training: 3668 batches/epoch
+2024-02-20 19:34:31,381 P3047876 INFO ************ Epoch=1 start ************
+2024-02-20 19:39:47,688 P3047876 INFO Train loss: 0.459406
+2024-02-20 19:39:47,688 P3047876 INFO Evaluation @epoch 1 - batch 3668:
+2024-02-20 19:40:02,818 P3047876 INFO [Metrics] AUC: 0.805111
+2024-02-20 19:40:02,820 P3047876 INFO Save best model: monitor(max)=0.805111
+2024-02-20 19:40:02,986 P3047876 INFO ************ Epoch=1 end ************
+2024-02-20 19:45:18,862 P3047876 INFO Train loss: 0.453592
+2024-02-20 19:45:18,863 P3047876 INFO Evaluation @epoch 2 - batch 3668:
+2024-02-20 19:45:34,638 P3047876 INFO [Metrics] AUC: 0.807930
+2024-02-20 19:45:34,639 P3047876 INFO Save best model: monitor(max)=0.807930
+2024-02-20 19:45:34,827 P3047876 INFO ************ Epoch=2 end ************
+2024-02-20 19:50:50,813 P3047876 INFO Train loss: 0.451762
+2024-02-20 19:50:50,813 P3047876 INFO Evaluation @epoch 3 - batch 3668:
+2024-02-20 19:51:05,790 P3047876 INFO [Metrics] AUC: 0.809258
+2024-02-20 19:51:05,791 P3047876 INFO Save best model: monitor(max)=0.809258
+2024-02-20 19:51:05,964 P3047876 INFO ************ Epoch=3 end ************
+2024-02-20 19:56:22,317 P3047876 INFO Train loss: 0.450756
+2024-02-20 19:56:22,318 P3047876 INFO Evaluation @epoch 4 - batch 3668:
+2024-02-20 19:56:37,561 P3047876 INFO [Metrics] AUC: 0.810058
+2024-02-20 19:56:37,565 P3047876 INFO Save best model: monitor(max)=0.810058
+2024-02-20 19:56:37,724 P3047876 INFO ************ Epoch=4 end ************
+2024-02-20 20:01:54,218 P3047876 INFO Train loss: 0.450101
+2024-02-20 20:01:54,219 P3047876 INFO Evaluation @epoch 5 - batch 3668:
+2024-02-20 20:02:08,996 P3047876 INFO [Metrics] AUC: 0.810280
+2024-02-20 20:02:08,997 P3047876 INFO Save best model: monitor(max)=0.810280
+2024-02-20 20:02:09,149 P3047876 INFO ************ Epoch=5 end ************
+2024-02-20 20:07:26,052 P3047876 INFO Train loss: 0.449591
+2024-02-20 20:07:26,052 P3047876 INFO Evaluation @epoch 6 - batch 3668:
+2024-02-20 20:07:40,973 P3047876 INFO [Metrics] AUC: 0.810754
+2024-02-20 20:07:40,973 P3047876 INFO Save best model: monitor(max)=0.810754
+2024-02-20 20:07:41,129 P3047876 INFO ************ Epoch=6 end ************
+2024-02-20 20:12:57,195 P3047876 INFO Train loss: 0.449185
+2024-02-20 20:12:57,196 P3047876 INFO Evaluation @epoch 7 - batch 3668:
+2024-02-20 20:13:12,692 P3047876 INFO [Metrics] AUC: 0.810747
+2024-02-20 20:13:12,693 P3047876 INFO Monitor(max)=0.810747 STOP!
+2024-02-20 20:13:12,693 P3047876 INFO Reduce learning rate on plateau: 0.000100
+2024-02-20 20:13:12,735 P3047876 INFO ************ Epoch=7 end ************
+2024-02-20 20:18:29,150 P3047876 INFO Train loss: 0.438594
+2024-02-20 20:18:29,150 P3047876 INFO Evaluation @epoch 8 - batch 3668:
+2024-02-20 20:18:43,989 P3047876 INFO [Metrics] AUC: 0.813723
+2024-02-20 20:18:43,990 P3047876 INFO Save best model: monitor(max)=0.813723
+2024-02-20 20:18:44,143 P3047876 INFO ************ Epoch=8 end ************
+2024-02-20 20:24:00,255 P3047876 INFO Train loss: 0.434399
+2024-02-20 20:24:00,255 P3047876 INFO Evaluation @epoch 9 - batch 3668:
+2024-02-20 20:24:15,936 P3047876 INFO [Metrics] AUC: 0.814101
+2024-02-20 20:24:15,940 P3047876 INFO Save best model: monitor(max)=0.814101
+2024-02-20 20:24:16,100 P3047876 INFO ************ Epoch=9 end ************
+2024-02-20 20:29:32,241 P3047876 INFO Train loss: 0.432275
+2024-02-20 20:29:32,242 P3047876 INFO Evaluation @epoch 10 - batch 3668:
+2024-02-20 20:29:47,828 P3047876 INFO [Metrics] AUC: 0.814036
+2024-02-20 20:29:47,829 P3047876 INFO Monitor(max)=0.814036 STOP!
+2024-02-20 20:29:47,830 P3047876 INFO Reduce learning rate on plateau: 0.000010
+2024-02-20 20:29:47,882 P3047876 INFO ************ Epoch=10 end ************
+2024-02-20 20:35:04,217 P3047876 INFO Train loss: 0.428519
+2024-02-20 20:35:04,217 P3047876 INFO Evaluation @epoch 11 - batch 3668:
+2024-02-20 20:35:19,473 P3047876 INFO [Metrics] AUC: 0.813797
+2024-02-20 20:35:19,474 P3047876 INFO Monitor(max)=0.813797 STOP!
+2024-02-20 20:35:19,474 P3047876 INFO Reduce learning rate on plateau: 0.000001
+2024-02-20 20:35:19,475 P3047876 INFO ********* Epoch==11 early stop *********
+2024-02-20 20:35:19,518 P3047876 INFO Training finished.
+2024-02-20 20:35:19,518 P3047876 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/criteo_x4_001_a5e05ce7/FinalMLP_criteo_x4_001_002_53a37ddd.model
+2024-02-20 20:35:19,600 P3047876 INFO ****** Validation evaluation ******
+2024-02-20 20:35:36,784 P3047876 INFO [Metrics] AUC: 0.814101 - logloss: 0.437752
+2024-02-20 20:35:36,900 P3047876 INFO ******** Test evaluation ********
+2024-02-20 20:35:36,901 P3047876 INFO Loading datasets...
+2024-02-20 20:35:41,978 P3047876 INFO Test samples: total/4584062, blocks/1
+2024-02-20 20:35:41,978 P3047876 INFO Loading test data done.
+2024-02-20 20:35:58,674 P3047876 INFO [Metrics] AUC: 0.814584 - logloss: 0.437353
diff --git a/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03.csv b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03.csv
new file mode 100644
index 00000000..90ed1171
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03.csv
@@ -0,0 +1,48 @@
+ 20240220-203558,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_002_53a37ddd --gpu 1,[exp_id] FinalMLP_criteo_x4_001_002_53a37ddd,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814101 - logloss: 0.437752,[test] AUC: 0.814584 - logloss: 0.437353
+ 20240220-204250,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_006_cfd9d39e --gpu 5,[exp_id] FinalMLP_criteo_x4_001_006_cfd9d39e,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814132 - logloss: 0.437882,[test] AUC: 0.814506 - logloss: 0.437551
+ 20240220-205510,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_001_d6056bd9 --gpu 0,[exp_id] FinalMLP_criteo_x4_001_001_d6056bd9,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813974 - logloss: 0.437866,[test] AUC: 0.814457 - logloss: 0.437451
+ 20240221-001122,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_026_8bc69af6 --gpu 2,[exp_id] FinalMLP_criteo_x4_001_026_8bc69af6,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813877 - logloss: 0.438156,[test] AUC: 0.814363 - logloss: 0.437755
+ 20240220-210405,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_003_308b21a7 --gpu 2,[exp_id] FinalMLP_criteo_x4_001_003_308b21a7,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813814 - logloss: 0.438044,[test] AUC: 0.814248 - logloss: 0.437687
+ 20240220-215854,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_009_2bb21848 --gpu 1,[exp_id] FinalMLP_criteo_x4_001_009_2bb21848,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813802 - logloss: 0.438251,[test] AUC: 0.814247 - logloss: 0.437859
+ 20240220-205707,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_005_308b21a7 --gpu 4,[exp_id] FinalMLP_criteo_x4_001_005_308b21a7,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813836 - logloss: 0.438144,[test] AUC: 0.814233 - logloss: 0.437784
+ 20240221-024556,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_044_e79fd7ed --gpu 5,[exp_id] FinalMLP_criteo_x4_001_044_e79fd7ed,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813814 - logloss: 0.438322,[test] AUC: 0.814206 - logloss: 0.437945
+ 20240220-211154,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_004_96804bdf --gpu 3,[exp_id] FinalMLP_criteo_x4_001_004_96804bdf,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813746 - logloss: 0.438219,[test] AUC: 0.814191 - logloss: 0.437863
+ 20240221-010259,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_032_fc723936 --gpu 0,[exp_id] FinalMLP_criteo_x4_001_032_fc723936,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813754 - logloss: 0.438396,[test] AUC: 0.814119 - logloss: 0.438090
+ 20240220-231732,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_020_308b21a7 --gpu 4,[exp_id] FinalMLP_criteo_x4_001_020_308b21a7,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813601 - logloss: 0.438492,[test] AUC: 0.814078 - logloss: 0.438084
+ 20240221-005005,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_028_c5522593 --gpu 5,[exp_id] FinalMLP_criteo_x4_001_028_c5522593,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813689 - logloss: 0.438468,[test] AUC: 0.814069 - logloss: 0.438177
+ 20240221-004633,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_029_e79fd7ed --gpu 7,[exp_id] FinalMLP_criteo_x4_001_029_e79fd7ed,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813646 - logloss: 0.438504,[test] AUC: 0.814054 - logloss: 0.438179
+ 20240221-005057,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_030_cd585edd --gpu 3,[exp_id] FinalMLP_criteo_x4_001_030_cd585edd,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813625 - logloss: 0.438557,[test] AUC: 0.814015 - logloss: 0.438211
+ 20240221-002910,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_025_360b57dc --gpu 1,[exp_id] FinalMLP_criteo_x4_001_025_360b57dc,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813579 - logloss: 0.438492,[test] AUC: 0.813905 - logloss: 0.438231
+ 20240221-012533,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_033_ea8a5ea8 --gpu 2,[exp_id] FinalMLP_criteo_x4_001_033_ea8a5ea8,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813490 - logloss: 0.438769,[test] AUC: 0.813897 - logloss: 0.438406
+ 20240220-210149,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_008_ab3400bb --gpu 7,[exp_id] FinalMLP_criteo_x4_001_008_ab3400bb,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813463 - logloss: 0.438607,[test] AUC: 0.813870 - logloss: 0.438285
+ 20240220-220138,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_014_9e61c1b1 --gpu 2,[exp_id] FinalMLP_criteo_x4_001_014_9e61c1b1,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813293 - logloss: 0.438647,[test] AUC: 0.813864 - logloss: 0.438140
+ 20240221-013231,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_031_fd811285 --gpu 6,[exp_id] FinalMLP_criteo_x4_001_031_fd811285,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813451 - logloss: 0.438759,[test] AUC: 0.813857 - logloss: 0.438398
+ 20240220-212501,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_007_04be9f58 --gpu 6,[exp_id] FinalMLP_criteo_x4_001_007_04be9f58,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813360 - logloss: 0.438769,[test] AUC: 0.813795 - logloss: 0.438415
+ 20240221-003654,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_027_e79fd7ed --gpu 4,[exp_id] FinalMLP_criteo_x4_001_027_e79fd7ed,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813373 - logloss: 0.438747,[test] AUC: 0.813772 - logloss: 0.438419
+ 20240220-224145,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_016_39527c72 --gpu 6,[exp_id] FinalMLP_criteo_x4_001_016_39527c72,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813233 - logloss: 0.438731,[test] AUC: 0.813699 - logloss: 0.438343
+ 20240220-231109,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_018_c1b43f80 --gpu 2,[exp_id] FinalMLP_criteo_x4_001_018_c1b43f80,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813158 - logloss: 0.438903,[test] AUC: 0.813637 - logloss: 0.438468
+ 20240221-015543,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_039_2b191c24 --gpu 0,[exp_id] FinalMLP_criteo_x4_001_039_2b191c24,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813178 - logloss: 0.438854,[test] AUC: 0.813633 - logloss: 0.438491
+ 20240220-232724,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_023_636af301 --gpu 5,[exp_id] FinalMLP_criteo_x4_001_023_636af301,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813162 - logloss: 0.438799,[test] AUC: 0.813615 - logloss: 0.438414
+ 20240220-221813,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_015_e1ee687c --gpu 3,[exp_id] FinalMLP_criteo_x4_001_015_e1ee687c,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813165 - logloss: 0.438804,[test] AUC: 0.813614 - logloss: 0.438412
+ 20240221-014607,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_037_4202b1e0 --gpu 5,[exp_id] FinalMLP_criteo_x4_001_037_4202b1e0,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813106 - logloss: 0.438971,[test] AUC: 0.813611 - logloss: 0.438537
+ 20240220-235010,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_019_96804bdf --gpu 0,[exp_id] FinalMLP_criteo_x4_001_019_96804bdf,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812997 - logloss: 0.439069,[test] AUC: 0.813555 - logloss: 0.438547
+ 20240221-014140,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_034_bb15ef1b --gpu 1,[exp_id] FinalMLP_criteo_x4_001_034_bb15ef1b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813113 - logloss: 0.438898,[test] AUC: 0.813551 - logloss: 0.438537
+ 20240220-231003,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_017_e1ee687c --gpu 1,[exp_id] FinalMLP_criteo_x4_001_017_e1ee687c,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813003 - logloss: 0.438898,[test] AUC: 0.813538 - logloss: 0.438419
+ 20240220-233146,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_021_cfd9d39e --gpu 7,[exp_id] FinalMLP_criteo_x4_001_021_cfd9d39e,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813123 - logloss: 0.439040,[test] AUC: 0.813525 - logloss: 0.438710
+ 20240220-221105,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_012_197f1ddc --gpu 4,[exp_id] FinalMLP_criteo_x4_001_012_197f1ddc,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812991 - logloss: 0.439034,[test] AUC: 0.813493 - logloss: 0.438604
+ 20240221-023430,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_040_e1e2498b --gpu 2,[exp_id] FinalMLP_criteo_x4_001_040_e1e2498b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813094 - logloss: 0.439086,[test] AUC: 0.813464 - logloss: 0.438772
+ 20240221-014611,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_038_d46fcdfb --gpu 3,[exp_id] FinalMLP_criteo_x4_001_038_d46fcdfb,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812917 - logloss: 0.439200,[test] AUC: 0.813433 - logloss: 0.438728
+ 20240220-220922,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_011_65d439c0 --gpu 0,[exp_id] FinalMLP_criteo_x4_001_011_65d439c0,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813020 - logloss: 0.438944,[test] AUC: 0.813411 - logloss: 0.438599
+ 20240221-025346,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_045_cd585edd --gpu 3,[exp_id] FinalMLP_criteo_x4_001_045_cd585edd,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812945 - logloss: 0.439237,[test] AUC: 0.813367 - logloss: 0.438881
+ 20240221-015157,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_036_e4644a33 --gpu 7,[exp_id] FinalMLP_criteo_x4_001_036_e4644a33,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812910 - logloss: 0.439221,[test] AUC: 0.813359 - logloss: 0.438829
+ 20240221-024328,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_042_7a8ed7b3 --gpu 1,[exp_id] FinalMLP_criteo_x4_001_042_7a8ed7b3,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812777 - logloss: 0.439450,[test] AUC: 0.813335 - logloss: 0.438968
+ 20240221-014350,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_035_d00a908c --gpu 4,[exp_id] FinalMLP_criteo_x4_001_035_d00a908c,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812719 - logloss: 0.439376,[test] AUC: 0.813308 - logloss: 0.438868
+ 20240220-234910,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_024_f51e866e --gpu 6,[exp_id] FinalMLP_criteo_x4_001_024_f51e866e,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812701 - logloss: 0.439347,[test] AUC: 0.813303 - logloss: 0.438836
+ 20240220-233809,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_022_9f9552b6 --gpu 3,[exp_id] FinalMLP_criteo_x4_001_022_9f9552b6,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812765 - logloss: 0.439253,[test] AUC: 0.813298 - logloss: 0.438828
+ 20240221-023550,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_041_2b191c24 --gpu 6,[exp_id] FinalMLP_criteo_x4_001_041_2b191c24,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812793 - logloss: 0.439503,[test] AUC: 0.813264 - logloss: 0.439091
+ 20240220-221705,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_013_1e5426f1 --gpu 7,[exp_id] FinalMLP_criteo_x4_001_013_1e5426f1,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812776 - logloss: 0.439204,[test] AUC: 0.813239 - logloss: 0.438784
+ 20240221-031755,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_043_c5522593 --gpu 4,[exp_id] FinalMLP_criteo_x4_001_043_c5522593,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812782 - logloss: 0.439452,[test] AUC: 0.813191 - logloss: 0.439098
+ 20240221-033349,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_048_9df8ab3b --gpu 2,[exp_id] FinalMLP_criteo_x4_001_048_9df8ab3b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812712 - logloss: 0.439527,[test] AUC: 0.813124 - logloss: 0.439165
+ 20240221-025637,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_047_63f5b715 --gpu 0,[exp_id] FinalMLP_criteo_x4_001_047_63f5b715,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812506 - logloss: 0.439672,[test] AUC: 0.813118 - logloss: 0.439164
+ 20240221-030400,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_046_09b8e092 --gpu 7,[exp_id] FinalMLP_criteo_x4_001_046_09b8e092,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812626 - logloss: 0.439481,[test] AUC: 0.813059 - logloss: 0.439132
+ 20240220-222553,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_010_37bd9769 --gpu 5,[exp_id] FinalMLP_criteo_x4_001_010_37bd9769,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812425 - logloss: 0.439628,[test] AUC: 0.812899 - logloss: 0.439220
diff --git a/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03.yaml b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03.yaml
new file mode 100644
index 00000000..cb626ee2
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03.yaml
@@ -0,0 +1,48 @@
+base_config: ../model_zoo/FinalMLP/config/
+base_expid: FinalMLP_default
+dataset_id: criteo_x4_001
+
+dataset_config:
+ criteo_x4_001:
+ data_format: csv
+ data_root: ../data/Criteo/
+ feature_cols:
+ - active: true
+ dtype: float
+ fill_na: 0
+ name: [I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13]
+ preprocess: convert_to_bucket
+ type: categorical
+ - active: true
+ dtype: str
+ fill_na: ''
+ name: [C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16,
+ C17, C18, C19, C20, C21, C22, C23, C24, C25, C26]
+ type: categorical
+ label_col: {dtype: float, name: Label}
+ min_categr_count: 10
+ test_data: ../data/Criteo/Criteo_x4/test.csv
+ train_data: ../data/Criteo/Criteo_x4/train.csv
+ valid_data: ../data/Criteo/Criteo_x4/valid.csv
+
+tuner_space:
+ model_root: './checkpoints/'
+ embedding_dim: 16
+ embedding_regularizer: 1.e-5
+ mlp1_batch_norm: False
+ mlp2_batch_norm: False
+ mlp1_hidden_units: [[1000, 1000, 1000, 1000, 1000], [1000, 1000, 1000]]
+ mlp2_hidden_units: [[500], [1000], [2000], [1000, 1000]]
+ mlp1_dropout: [0.2, 0.1]
+ mlp2_dropout: 0.1
+ use_fs: False
+ fs_hidden_units: [[1000]]
+ fs1_context: [[]]
+ fs2_context: [[]]
+ num_heads: [10, 50, 100]
+ learning_rate: 1.e-3
+ batch_size: 10000
+ seed: 2019
+ monitor: 'AUC'
+ monitor_mode: 'max'
+ metrics: [['AUC', 'logloss']]
diff --git a/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03/dataset_config.yaml b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03/dataset_config.yaml
new file mode 100644
index 00000000..73e334c1
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03/dataset_config.yaml
@@ -0,0 +1,21 @@
+criteo_x4_001_a5e05ce7:
+ data_format: csv
+ data_root: ../data/Criteo/
+ feature_cols:
+ - active: true
+ dtype: float
+ fill_na: 0
+ name: [I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13]
+ preprocess: convert_to_bucket
+ type: categorical
+ - active: true
+ dtype: str
+ fill_na: ''
+ name: [C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16,
+ C17, C18, C19, C20, C21, C22, C23, C24, C25, C26]
+ type: categorical
+ label_col: {dtype: float, name: Label}
+ min_categr_count: 10
+ test_data: ../data/Criteo/Criteo_x4/test.csv
+ train_data: ../data/Criteo/Criteo_x4/train.csv
+ valid_data: ../data/Criteo/Criteo_x4/valid.csv
diff --git a/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03/model_config.yaml b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03/model_config.yaml
new file mode 100644
index 00000000..3a838127
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03/model_config.yaml
@@ -0,0 +1,2016 @@
+FinalMLP_criteo_x4_001_001_d6056bd9:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_002_53a37ddd:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_003_308b21a7:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_004_96804bdf:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_005_308b21a7:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_006_cfd9d39e:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_007_04be9f58:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_008_ab3400bb:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_009_2bb21848:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_010_37bd9769:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_011_65d439c0:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_012_197f1ddc:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_013_1e5426f1:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_014_9e61c1b1:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_015_e1ee687c:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_016_39527c72:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_017_e1ee687c:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_018_c1b43f80:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_019_96804bdf:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_020_308b21a7:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_021_cfd9d39e:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_022_9f9552b6:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_023_636af301:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_024_f51e866e:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_025_360b57dc:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_026_8bc69af6:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_027_e79fd7ed:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_028_c5522593:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_029_e79fd7ed:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_030_cd585edd:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_031_fd811285:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_032_fc723936:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_033_ea8a5ea8:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_034_bb15ef1b:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_035_d00a908c:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_036_e4644a33:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.2
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_037_4202b1e0:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_038_d46fcdfb:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_039_2b191c24:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [500]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_040_e1e2498b:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_041_2b191c24:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_042_7a8ed7b3:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_043_c5522593:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_044_e79fd7ed:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_045_cd585edd:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [2000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_046_09b8e092:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 10
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_047_63f5b715:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 50
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
+FinalMLP_criteo_x4_001_048_9df8ab3b:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ fs1_context: []
+ fs2_context: []
+ fs_hidden_units: [1000]
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ mlp1_batch_norm: false
+ mlp1_dropout: 0.1
+ mlp1_hidden_activations: relu
+ mlp1_hidden_units: [1000, 1000, 1000]
+ mlp2_batch_norm: false
+ mlp2_dropout: 0.1
+ mlp2_hidden_activations: relu
+ mlp2_hidden_units: [1000, 1000]
+ model: FinalMLP
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_heads: 100
+ num_workers: 3
+ optimizer: adam
+ pickle_feature_encoder: true
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ use_fs: false
+ verbose: 1
diff --git a/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/README.md b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/README.md
new file mode 100644
index 00000000..8efb3272
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/README.md
@@ -0,0 +1,247 @@
+## FinalMLP_criteo_x4_001
+
+A hands-on guide to run the FinalMLP model on the Criteo_x4 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+
+| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
+|:-----------------------------:|:-----------:|:--------:|:--------:|-------|
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.7.10
+ pytorch: 1.10.2+cu102
+ pandas: 1.1.5
+ numpy: 1.19.5
+ scipy: 1.5.2
+ sklearn: 0.22.1
+ pyyaml: 6.0.1
+ h5py: 2.8.0
+ tqdm: 4.64.0
+ keras_preprocessing: 1.1.2
+ fuxictr: 2.2.0
+ ```
+
+### Dataset
+Please refer to [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4) to get the dataset details.
+
+### Code
+
+We use the [FinalMLP](https://github.com/reczoo/FuxiCTR/tree/v2.2.0/model_zoo/FinalMLP) model code from [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/tree/v2.2.0) for this experiment.
+
+Running steps:
+
+1. Download [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.2.0.zip) and install all the dependencies listed in the [environments](#environments).
+
+ ```bash
+ pip uninstall fuxictr
+ pip install fuxictr==2.2.0
+ ```
+
+2. Create a data directory and put the downloaded data files in `../data/Criteo/Criteo_x4`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [FinalMLP_criteo_x4_tuner_config_03](./FinalMLP_criteo_x4_tuner_config_03). Please make sure that the data paths in `dataset_config.yaml` are correctly set.
+
+4. Run the following script to start training and evaluation.
+
+ ```bash
+ cd FuxiCTR/model_zoo/FinalMLP
+ nohup python run_expid.py --config YOUR_PATH/FinalMLP/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_002_53a37ddd --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.814584 | 0.437353 |
+
+
+### Logs
+```python
+2024-02-20 19:33:45,916 P3047876 INFO Params: {
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x4_001_a5e05ce7",
+ "debug_mode": "False",
+ "early_stop_patience": "2",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'fill_na': 0, 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'preprocess': 'convert_to_bucket', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'fill_na': '', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "feature_config": "None",
+ "feature_specs": "None",
+ "fs1_context": "[]",
+ "fs2_context": "[]",
+ "fs_hidden_units": "[1000]",
+ "gpu": "1",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'Label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "10",
+ "mlp1_batch_norm": "False",
+ "mlp1_dropout": "0.2",
+ "mlp1_hidden_activations": "relu",
+ "mlp1_hidden_units": "[1000, 1000, 1000, 1000, 1000]",
+ "mlp2_batch_norm": "False",
+ "mlp2_dropout": "0.1",
+ "mlp2_hidden_activations": "relu",
+ "mlp2_hidden_units": "[500]",
+ "model": "FinalMLP",
+ "model_id": "FinalMLP_criteo_x4_001_002_53a37ddd",
+ "model_root": "./checkpoints/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_heads": "50",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x4/test.csv",
+ "train_data": "../data/Criteo/Criteo_x4/train.csv",
+ "use_features": "None",
+ "use_fs": "False",
+ "valid_data": "../data/Criteo/Criteo_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-20 19:33:45,917 P3047876 INFO Set up feature processor...
+2024-02-20 19:33:45,917 P3047876 WARNING Skip rebuilding ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-20 19:33:45,917 P3047876 INFO Load feature_map from json: ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json
+2024-02-20 19:33:45,917 P3047876 INFO Set column index...
+2024-02-20 19:33:45,918 P3047876 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 1446}",
+ "C10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 39530}",
+ "C11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5131}",
+ "C12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 156656}",
+ "C13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 3176}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 28}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11043}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 148913}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 12}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4560}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2003}",
+ "C2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 554}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 154564}",
+ "C22": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C23": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 17}",
+ "C24": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 53031}",
+ "C25": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 82}",
+ "C26": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 40955}",
+ "C3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 157339}",
+ "C4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 117822}",
+ "C5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 306}",
+ "C6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11882}",
+ "C8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 630}",
+ "C9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "I1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 44}",
+ "I10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "I11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "I12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 37}",
+ "I13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 72}",
+ "I2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 99}",
+ "I3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 122}",
+ "I4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 41}",
+ "I5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 220}",
+ "I6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 112}",
+ "I7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 80}",
+ "I8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 69}",
+ "I9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 92}"
+}
+2024-02-20 19:33:51,214 P3047876 INFO Total number of parameters: 19524954.
+2024-02-20 19:33:51,215 P3047876 INFO Loading datasets...
+2024-02-20 19:34:26,891 P3047876 INFO Train samples: total/36672493, blocks/1
+2024-02-20 19:34:31,381 P3047876 INFO Validation samples: total/4584062, blocks/1
+2024-02-20 19:34:31,381 P3047876 INFO Loading train and validation data done.
+2024-02-20 19:34:31,381 P3047876 INFO Start training: 3668 batches/epoch
+2024-02-20 19:34:31,381 P3047876 INFO ************ Epoch=1 start ************
+2024-02-20 19:39:47,688 P3047876 INFO Train loss: 0.459406
+2024-02-20 19:39:47,688 P3047876 INFO Evaluation @epoch 1 - batch 3668:
+2024-02-20 19:40:02,818 P3047876 INFO [Metrics] AUC: 0.805111
+2024-02-20 19:40:02,820 P3047876 INFO Save best model: monitor(max)=0.805111
+2024-02-20 19:40:02,986 P3047876 INFO ************ Epoch=1 end ************
+2024-02-20 19:45:18,862 P3047876 INFO Train loss: 0.453592
+2024-02-20 19:45:18,863 P3047876 INFO Evaluation @epoch 2 - batch 3668:
+2024-02-20 19:45:34,638 P3047876 INFO [Metrics] AUC: 0.807930
+2024-02-20 19:45:34,639 P3047876 INFO Save best model: monitor(max)=0.807930
+2024-02-20 19:45:34,827 P3047876 INFO ************ Epoch=2 end ************
+2024-02-20 19:50:50,813 P3047876 INFO Train loss: 0.451762
+2024-02-20 19:50:50,813 P3047876 INFO Evaluation @epoch 3 - batch 3668:
+2024-02-20 19:51:05,790 P3047876 INFO [Metrics] AUC: 0.809258
+2024-02-20 19:51:05,791 P3047876 INFO Save best model: monitor(max)=0.809258
+2024-02-20 19:51:05,964 P3047876 INFO ************ Epoch=3 end ************
+2024-02-20 19:56:22,317 P3047876 INFO Train loss: 0.450756
+2024-02-20 19:56:22,318 P3047876 INFO Evaluation @epoch 4 - batch 3668:
+2024-02-20 19:56:37,561 P3047876 INFO [Metrics] AUC: 0.810058
+2024-02-20 19:56:37,565 P3047876 INFO Save best model: monitor(max)=0.810058
+2024-02-20 19:56:37,724 P3047876 INFO ************ Epoch=4 end ************
+2024-02-20 20:01:54,218 P3047876 INFO Train loss: 0.450101
+2024-02-20 20:01:54,219 P3047876 INFO Evaluation @epoch 5 - batch 3668:
+2024-02-20 20:02:08,996 P3047876 INFO [Metrics] AUC: 0.810280
+2024-02-20 20:02:08,997 P3047876 INFO Save best model: monitor(max)=0.810280
+2024-02-20 20:02:09,149 P3047876 INFO ************ Epoch=5 end ************
+2024-02-20 20:07:26,052 P3047876 INFO Train loss: 0.449591
+2024-02-20 20:07:26,052 P3047876 INFO Evaluation @epoch 6 - batch 3668:
+2024-02-20 20:07:40,973 P3047876 INFO [Metrics] AUC: 0.810754
+2024-02-20 20:07:40,973 P3047876 INFO Save best model: monitor(max)=0.810754
+2024-02-20 20:07:41,129 P3047876 INFO ************ Epoch=6 end ************
+2024-02-20 20:12:57,195 P3047876 INFO Train loss: 0.449185
+2024-02-20 20:12:57,196 P3047876 INFO Evaluation @epoch 7 - batch 3668:
+2024-02-20 20:13:12,692 P3047876 INFO [Metrics] AUC: 0.810747
+2024-02-20 20:13:12,693 P3047876 INFO Monitor(max)=0.810747 STOP!
+2024-02-20 20:13:12,693 P3047876 INFO Reduce learning rate on plateau: 0.000100
+2024-02-20 20:13:12,735 P3047876 INFO ************ Epoch=7 end ************
+2024-02-20 20:18:29,150 P3047876 INFO Train loss: 0.438594
+2024-02-20 20:18:29,150 P3047876 INFO Evaluation @epoch 8 - batch 3668:
+2024-02-20 20:18:43,989 P3047876 INFO [Metrics] AUC: 0.813723
+2024-02-20 20:18:43,990 P3047876 INFO Save best model: monitor(max)=0.813723
+2024-02-20 20:18:44,143 P3047876 INFO ************ Epoch=8 end ************
+2024-02-20 20:24:00,255 P3047876 INFO Train loss: 0.434399
+2024-02-20 20:24:00,255 P3047876 INFO Evaluation @epoch 9 - batch 3668:
+2024-02-20 20:24:15,936 P3047876 INFO [Metrics] AUC: 0.814101
+2024-02-20 20:24:15,940 P3047876 INFO Save best model: monitor(max)=0.814101
+2024-02-20 20:24:16,100 P3047876 INFO ************ Epoch=9 end ************
+2024-02-20 20:29:32,241 P3047876 INFO Train loss: 0.432275
+2024-02-20 20:29:32,242 P3047876 INFO Evaluation @epoch 10 - batch 3668:
+2024-02-20 20:29:47,828 P3047876 INFO [Metrics] AUC: 0.814036
+2024-02-20 20:29:47,829 P3047876 INFO Monitor(max)=0.814036 STOP!
+2024-02-20 20:29:47,830 P3047876 INFO Reduce learning rate on plateau: 0.000010
+2024-02-20 20:29:47,882 P3047876 INFO ************ Epoch=10 end ************
+2024-02-20 20:35:04,217 P3047876 INFO Train loss: 0.428519
+2024-02-20 20:35:04,217 P3047876 INFO Evaluation @epoch 11 - batch 3668:
+2024-02-20 20:35:19,473 P3047876 INFO [Metrics] AUC: 0.813797
+2024-02-20 20:35:19,474 P3047876 INFO Monitor(max)=0.813797 STOP!
+2024-02-20 20:35:19,474 P3047876 INFO Reduce learning rate on plateau: 0.000001
+2024-02-20 20:35:19,475 P3047876 INFO ********* Epoch==11 early stop *********
+2024-02-20 20:35:19,518 P3047876 INFO Training finished.
+2024-02-20 20:35:19,518 P3047876 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/criteo_x4_001_a5e05ce7/FinalMLP_criteo_x4_001_002_53a37ddd.model
+2024-02-20 20:35:19,600 P3047876 INFO ****** Validation evaluation ******
+2024-02-20 20:35:36,784 P3047876 INFO [Metrics] AUC: 0.814101 - logloss: 0.437752
+2024-02-20 20:35:36,900 P3047876 INFO ******** Test evaluation ********
+2024-02-20 20:35:36,901 P3047876 INFO Loading datasets...
+2024-02-20 20:35:41,978 P3047876 INFO Test samples: total/4584062, blocks/1
+2024-02-20 20:35:41,978 P3047876 INFO Loading test data done.
+2024-02-20 20:35:58,674 P3047876 INFO [Metrics] AUC: 0.814584 - logloss: 0.437353
+
+```
diff --git a/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/environments.txt b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/environments.txt
new file mode 100644
index 00000000..b4567ace
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/environments.txt
@@ -0,0 +1,18 @@
+[Hardware]
+CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+GPU: Tesla V100 32G
+RAM: 755G
+
+[Software]
+CUDA: 10.2
+python: 3.7.10
+pytorch: 1.10.2+cu102
+pandas: 1.1.5
+numpy: 1.19.5
+scipy: 1.5.2
+sklearn: 0.22.1
+pyyaml: 6.0.1
+h5py: 2.8.0
+tqdm: 4.64.0
+keras_preprocessing: 1.1.2
+fuxictr: 2.2.0
\ No newline at end of file
diff --git a/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/results.csv b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/results.csv
new file mode 100644
index 00000000..ffc46712
--- /dev/null
+++ b/ranking/ctr/FinalMLP/FinalMLP_criteo_x4_001/results.csv
@@ -0,0 +1 @@
+ 20240220-203558,[command] python run_expid.py --config Criteo_x4/FinalMLP_criteo_x4_001/FinalMLP_criteo_x4_tuner_config_03 --expid FinalMLP_criteo_x4_001_002_53a37ddd --gpu 1,[exp_id] FinalMLP_criteo_x4_001_002_53a37ddd,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814101 - logloss: 0.437752,[test] AUC: 0.814584 - logloss: 0.437353
diff --git a/ranking/ctr/FinalMLP/FinalMLP_frappe_x1/README.md b/ranking/ctr/FinalMLP/FinalMLP_frappe_x1/README.md
index c6e50264..a1332b22 100644
--- a/ranking/ctr/FinalMLP/FinalMLP_frappe_x1/README.md
+++ b/ranking/ctr/FinalMLP/FinalMLP_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FinalMLP model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -38,11 +38,11 @@ Please refer to the BARS dataset [Frappe_x1](https://github.com/openbenchmark/BA
### Code
-We use the [FinalMLP](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FinalMLP) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FinalMLP](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FinalMLP) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalMLP/FinalMLP_movielenslatest_x1/README.md b/ranking/ctr/FinalMLP/FinalMLP_movielenslatest_x1/README.md
index 27b6ec6c..a7931152 100644
--- a/ranking/ctr/FinalMLP/FinalMLP_movielenslatest_x1/README.md
+++ b/ranking/ctr/FinalMLP/FinalMLP_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FinalMLP model on the MovielensLatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -38,11 +38,11 @@ Please refer to the BARS dataset [MovielensLatest_x1](https://github.com/openben
### Code
-We use the [FinalMLP](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FinalMLP) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FinalMLP](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FinalMLP) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalNet/FinalNet_avazu_x1/FinalNet_1B_avazu_x1/README.md b/ranking/ctr/FinalNet/FinalNet_avazu_x1/FinalNet_1B_avazu_x1/README.md
index 37440d1e..b652df02 100644
--- a/ranking/ctr/FinalNet/FinalNet_avazu_x1/FinalNet_1B_avazu_x1/README.md
+++ b/ranking/ctr/FinalNet/FinalNet_avazu_x1/FinalNet_1B_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FINAL model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -39,11 +39,11 @@ Please refer to the BARS dataset [Avazu_x1](https://github.com/openbenchmark/BAR
### Code
-We use the [FINAL](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [FINAL](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalNet/FinalNet_avazu_x1/FinalNet_2B_avazu_x1/README.md b/ranking/ctr/FinalNet/FinalNet_avazu_x1/FinalNet_2B_avazu_x1/README.md
index b6620ba7..65aded0a 100644
--- a/ranking/ctr/FinalNet/FinalNet_avazu_x1/FinalNet_2B_avazu_x1/README.md
+++ b/ranking/ctr/FinalNet/FinalNet_avazu_x1/FinalNet_2B_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FINAL model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -39,11 +39,11 @@ Please refer to the BARS dataset [Avazu_x1](https://github.com/openbenchmark/BAR
### Code
-We use the [FINAL](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [FINAL](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_001_015_4b405413.log b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_001_015_4b405413.log
new file mode 100644
index 00000000..569fdba9
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_001_015_4b405413.log
@@ -0,0 +1,106 @@
+2024-02-22 13:22:42,272 P1566359 INFO Params: {
+ "batch_norm": "True",
+ "batch_size": "8192",
+ "block1_dropout": "0",
+ "block1_hidden_activations": "ReLU",
+ "block1_hidden_units": "[2000, 2000, 2000]",
+ "block2_dropout": "0.1",
+ "block2_hidden_activations": "ReLU",
+ "block2_hidden_units": "[500]",
+ "block_type": "2B",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x4_001_a31210da",
+ "debug_mode": "False",
+ "early_stop_patience": "1",
+ "embedding_dim": "16",
+ "embedding_regularizer": "0",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': False, 'dtype': 'str', 'name': 'id', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'hour', 'preprocess': 'convert_hour', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': ['C1', 'banner_pos', 'site_id', 'site_domain', 'site_category', 'app_id', 'app_domain', 'app_category', 'device_id', 'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekday', 'preprocess': 'convert_weekday', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekend', 'preprocess': 'convert_weekend', 'type': 'categorical'}]",
+ "feature_specs": "None",
+ "gpu": "6",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'click'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "2",
+ "model": "FinalNet",
+ "model_id": "FinalNet_avazu_x4_001_015_4b405413",
+ "model_root": "./checkpoints/",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "ordered_features": "None",
+ "pickle_feature_encoder": "True",
+ "residual_type": "concat",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x4/test.csv",
+ "train_data": "../data/Avazu/Avazu_x4/train.csv",
+ "use_feature_gating": "True",
+ "valid_data": "../data/Avazu/Avazu_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-22 13:22:42,273 P1566359 INFO Set up feature processor...
+2024-02-22 13:22:42,273 P1566359 WARNING Skip rebuilding ../data/Avazu/avazu_x4_001_a31210da/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-22 13:22:42,273 P1566359 INFO Load feature_map from json: ../data/Avazu/avazu_x4_001_a31210da/feature_map.json
+2024-02-22 13:22:42,273 P1566359 INFO Set column index...
+2024-02-22 13:22:42,274 P1566359 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2556}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 10}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 434}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 70}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 173}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 62}",
+ "app_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 33}",
+ "app_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 400}",
+ "app_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6545}",
+ "banner_pos": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "device_conn_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "device_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 820509}",
+ "device_ip": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2903322}",
+ "device_model": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7259}",
+ "device_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7}",
+ "hour": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 26}",
+ "site_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "site_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5461}",
+ "site_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4051}",
+ "weekday": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "weekend": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4}"
+}
+2024-02-22 13:22:46,770 P1566359 INFO Total number of parameters: 69766586.
+2024-02-22 13:22:46,770 P1566359 INFO Loading datasets...
+2024-02-22 13:23:07,420 P1566359 INFO Train samples: total/32343172, blocks/1
+2024-02-22 13:23:09,945 P1566359 INFO Validation samples: total/4042897, blocks/1
+2024-02-22 13:23:09,946 P1566359 INFO Loading train and validation data done.
+2024-02-22 13:23:09,946 P1566359 INFO Start training: 3949 batches/epoch
+2024-02-22 13:23:09,946 P1566359 INFO ************ Epoch=1 start ************
+2024-02-22 13:28:17,207 P1566359 INFO Train loss: 0.379626
+2024-02-22 13:28:17,207 P1566359 INFO Evaluation @epoch 1 - batch 3949:
+2024-02-22 13:28:33,023 P1566359 INFO [Metrics] AUC: 0.793961 - logloss: 0.371360
+2024-02-22 13:28:33,026 P1566359 INFO Save best model: monitor(max)=0.422601
+2024-02-22 13:28:33,641 P1566359 INFO ************ Epoch=1 end ************
+2024-02-22 13:33:41,213 P1566359 INFO Train loss: 0.330151
+2024-02-22 13:33:41,213 P1566359 INFO Evaluation @epoch 2 - batch 3949:
+2024-02-22 13:33:57,482 P1566359 INFO [Metrics] AUC: 0.789077 - logloss: 0.381398
+2024-02-22 13:33:57,485 P1566359 INFO Monitor(max)=0.407679 STOP!
+2024-02-22 13:33:57,485 P1566359 INFO Reduce learning rate on plateau: 0.000100
+2024-02-22 13:33:57,485 P1566359 INFO ********* Epoch==2 early stop *********
+2024-02-22 13:33:57,530 P1566359 INFO Training finished.
+2024-02-22 13:33:57,531 P1566359 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/avazu_x4_001_a31210da/FinalNet_avazu_x4_001_015_4b405413.model
+2024-02-22 13:33:57,720 P1566359 INFO ****** Validation evaluation ******
+2024-02-22 13:34:13,540 P1566359 INFO [Metrics] AUC: 0.793961 - logloss: 0.371360
+2024-02-22 13:34:13,621 P1566359 INFO ******** Test evaluation ********
+2024-02-22 13:34:13,621 P1566359 INFO Loading datasets...
+2024-02-22 13:34:16,143 P1566359 INFO Test samples: total/4042898, blocks/1
+2024-02-22 13:34:16,143 P1566359 INFO Loading test data done.
+2024-02-22 13:34:31,959 P1566359 INFO [Metrics] AUC: 0.794116 - logloss: 0.371254
diff --git a/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04.csv b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04.csv
new file mode 100644
index 00000000..f0cc108c
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04.csv
@@ -0,0 +1,40 @@
+ 20240222-133431,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_015_4b405413 --gpu 6,[exp_id] FinalNet_avazu_x4_001_015_4b405413,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793961 - logloss: 0.371360,[test] AUC: 0.794116 - logloss: 0.371254
+ 20240222-133435,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_016_7871464d --gpu 7,[exp_id] FinalNet_avazu_x4_001_016_7871464d,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793869 - logloss: 0.371438,[test] AUC: 0.793897 - logloss: 0.371396
+ 20240222-133411,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_013_65c04d7c --gpu 5,[exp_id] FinalNet_avazu_x4_001_013_65c04d7c,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793772 - logloss: 0.371926,[test] AUC: 0.793883 - logloss: 0.371859
+ 20240222-132222,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_006_4b405413 --gpu 5,[exp_id] FinalNet_avazu_x4_001_006_4b405413,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793840 - logloss: 0.371475,[test] AUC: 0.793844 - logloss: 0.371473
+ 20240222-132221,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_003_78a15262 --gpu 2,[exp_id] FinalNet_avazu_x4_001_003_78a15262,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793690 - logloss: 0.371642,[test] AUC: 0.793827 - logloss: 0.371566
+ 20240222-133413,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_014_b1cae98e --gpu 4,[exp_id] FinalNet_avazu_x4_001_014_b1cae98e,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793771 - logloss: 0.371959,[test] AUC: 0.793808 - logloss: 0.371897
+ 20240222-132225,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_005_b4887ee5 --gpu 4,[exp_id] FinalNet_avazu_x4_001_005_b4887ee5,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793607 - logloss: 0.371659,[test] AUC: 0.793655 - logloss: 0.371628
+ 20240222-132218,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_004_65c04d7c --gpu 3,[exp_id] FinalNet_avazu_x4_001_004_65c04d7c,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793592 - logloss: 0.371777,[test] AUC: 0.793654 - logloss: 0.371748
+ 20240222-135747,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_033_7161b11e --gpu 4,[exp_id] FinalNet_avazu_x4_001_033_7161b11e,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793398 - logloss: 0.371869,[test] AUC: 0.793558 - logloss: 0.371786
+ 20240222-135757,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_034_8d53f431 --gpu 0,[exp_id] FinalNet_avazu_x4_001_034_8d53f431,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793382 - logloss: 0.371762,[test] AUC: 0.793506 - logloss: 0.371691
+ 20240222-133400,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_012_9f5e50ba --gpu 3,[exp_id] FinalNet_avazu_x4_001_012_9f5e50ba,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793248 - logloss: 0.372908,[test] AUC: 0.793483 - logloss: 0.372788
+ 20240222-133415,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_011_8c1e4350 --gpu 2,[exp_id] FinalNet_avazu_x4_001_011_8c1e4350,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793247 - logloss: 0.372530,[test] AUC: 0.793466 - logloss: 0.372407
+ 20240222-134958,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_025_0ed13236 --gpu 4,[exp_id] FinalNet_avazu_x4_001_025_0ed13236,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793311 - logloss: 0.371928,[test] AUC: 0.793463 - logloss: 0.371837
+ 20240222-135828,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_035_fef319c9 --gpu 7,[exp_id] FinalNet_avazu_x4_001_035_fef319c9,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793276 - logloss: 0.372033,[test] AUC: 0.793462 - logloss: 0.371922
+ 20240222-132237,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_007_861e52a0 --gpu 6,[exp_id] FinalNet_avazu_x4_001_007_861e52a0,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793200 - logloss: 0.371938,[test] AUC: 0.793433 - logloss: 0.371800
+ 20240222-132219,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_001_021b5f8d --gpu 0,[exp_id] FinalNet_avazu_x4_001_001_021b5f8d,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793119 - logloss: 0.372740,[test] AUC: 0.793389 - logloss: 0.372579
+ 20240222-135834,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_036_7917160e --gpu 6,[exp_id] FinalNet_avazu_x4_001_036_7917160e,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793262 - logloss: 0.372004,[test] AUC: 0.793371 - logloss: 0.371920
+ 20240222-135400,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_031_9d00c094 --gpu 3,[exp_id] FinalNet_avazu_x4_001_031_9d00c094,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793029 - logloss: 0.372028,[test] AUC: 0.793289 - logloss: 0.371856
+ 20240222-132208,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_002_8c1e4350 --gpu 1,[exp_id] FinalNet_avazu_x4_001_002_8c1e4350,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792994 - logloss: 0.372372,[test] AUC: 0.793279 - logloss: 0.372219
+ 20240222-134610,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_019_cada1100 --gpu 5,[exp_id] FinalNet_avazu_x4_001_019_cada1100,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793108 - logloss: 0.371962,[test] AUC: 0.793257 - logloss: 0.371852
+ 20240222-140201,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_039_507a70cf --gpu 2,[exp_id] FinalNet_avazu_x4_001_039_507a70cf,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792945 - logloss: 0.372026,[test] AUC: 0.793244 - logloss: 0.371880
+ 20240222-135405,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_032_e02b1105 --gpu 1,[exp_id] FinalNet_avazu_x4_001_032_e02b1105,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792967 - logloss: 0.372048,[test] AUC: 0.793235 - logloss: 0.371885
+ 20240222-134220,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_024_7161b11e --gpu 7,[exp_id] FinalNet_avazu_x4_001_024_7161b11e,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793057 - logloss: 0.372041,[test] AUC: 0.793191 - logloss: 0.371942
+ 20240222-134622,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_018_b2ebd161 --gpu 1,[exp_id] FinalNet_avazu_x4_001_018_b2ebd161,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792981 - logloss: 0.372094,[test] AUC: 0.793175 - logloss: 0.371992
+ 20240222-140209,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_038_22192711 --gpu 1,[exp_id] FinalNet_avazu_x4_001_038_22192711,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792939 - logloss: 0.372050,[test] AUC: 0.793145 - logloss: 0.371957
+ 20240222-134223,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_023_867276e9 --gpu 6,[exp_id] FinalNet_avazu_x4_001_023_867276e9,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792829 - logloss: 0.372192,[test] AUC: 0.793079 - logloss: 0.372063
+ 20240222-140206,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_037_b193daff --gpu 3,[exp_id] FinalNet_avazu_x4_001_037_b193daff,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792869 - logloss: 0.372112,[test] AUC: 0.793071 - logloss: 0.372015
+ 20240222-135012,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_026_fef319c9 --gpu 0,[exp_id] FinalNet_avazu_x4_001_026_fef319c9,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792929 - logloss: 0.372044,[test] AUC: 0.793043 - logloss: 0.371983
+ 20240222-134205,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_021_a8abb409 --gpu 4,[exp_id] FinalNet_avazu_x4_001_021_a8abb409,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792773 - logloss: 0.372057,[test] AUC: 0.792989 - logloss: 0.371921
+ 20240222-132237,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_008_d7ebfd2d --gpu 7,[exp_id] FinalNet_avazu_x4_001_008_d7ebfd2d,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792865 - logloss: 0.372159,[test] AUC: 0.792955 - logloss: 0.372100
+ 20240222-133410,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_009_9b4174e4 --gpu 1,[exp_id] FinalNet_avazu_x4_001_009_9b4174e4,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792939 - logloss: 0.372091,[test] AUC: 0.792949 - logloss: 0.372042
+ 20240222-134218,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_022_9d00c094 --gpu 0,[exp_id] FinalNet_avazu_x4_001_022_9d00c094,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792851 - logloss: 0.372013,[test] AUC: 0.792935 - logloss: 0.371965
+ 20240222-134615,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_020_ac3a250d --gpu 2,[exp_id] FinalNet_avazu_x4_001_020_ac3a250d,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792773 - logloss: 0.372232,[test] AUC: 0.792836 - logloss: 0.372180
+ 20240222-135037,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_027_19b02978 --gpu 7,[exp_id] FinalNet_avazu_x4_001_027_19b02978,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792744 - logloss: 0.372224,[test] AUC: 0.792830 - logloss: 0.372169
+ 20240222-134615,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_017_d7ebfd2d --gpu 3,[exp_id] FinalNet_avazu_x4_001_017_d7ebfd2d,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792652 - logloss: 0.372724,[test] AUC: 0.792801 - logloss: 0.372621
+ 20240222-135039,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_028_b193daff --gpu 6,[exp_id] FinalNet_avazu_x4_001_028_b193daff,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792331 - logloss: 0.372400,[test] AUC: 0.792751 - logloss: 0.372167
+ 20240222-140159,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_040_fad2874f --gpu 5,[exp_id] FinalNet_avazu_x4_001_040_fad2874f,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792593 - logloss: 0.372381,[test] AUC: 0.792723 - logloss: 0.372283
+ 20240222-133431,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_010_cada1100 --gpu 0,[exp_id] FinalNet_avazu_x4_001_010_cada1100,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792426 - logloss: 0.372601,[test] AUC: 0.792572 - logloss: 0.372488
+ 20240222-135408,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_029_05eab58b --gpu 5,[exp_id] FinalNet_avazu_x4_001_029_05eab58b,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792367 - logloss: 0.372385,[test] AUC: 0.792539 - logloss: 0.372294
+ 20240222-135411,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_030_507a70cf --gpu 2,[exp_id] FinalNet_avazu_x4_001_030_507a70cf,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.792141 - logloss: 0.372523,[test] AUC: 0.792138 - logloss: 0.372536
diff --git a/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04.yaml b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04.yaml
new file mode 100644
index 00000000..71d66b6e
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04.yaml
@@ -0,0 +1,42 @@
+base_config: ../model_zoo/FinalNet/config/
+base_expid: FinalNet_default
+dataset_id: avazu_x4_001
+
+dataset_config:
+ avazu_x4_001:
+ data_root: ../data/Avazu/
+ data_format: csv
+ train_data: ../data/Avazu/Avazu_x4/train.csv
+ valid_data: ../data/Avazu/Avazu_x4/valid.csv
+ test_data: ../data/Avazu/Avazu_x4/test.csv
+ min_categr_count: 2
+ feature_cols:
+ - {name: id, active: False, dtype: str, type: categorical}
+ - {name: hour, active: True, dtype: str, type: categorical, preprocess: convert_hour}
+ - {name: [C1,banner_pos,site_id,site_domain,site_category,app_id,app_domain,app_category,device_id,
+ device_ip,device_model,device_type,device_conn_type,C14,C15,C16,C17,C18,C19,C20,C21],
+ active: True, dtype: str, type: categorical}
+ - {name: weekday, active: True, dtype: str, type: categorical, preprocess: convert_weekday}
+ - {name: weekend, active: True, dtype: str, type: categorical, preprocess: convert_weekend}
+ label_col: {name: click, dtype: float}
+
+
+tuner_space:
+ model_root: './checkpoints/'
+ embedding_dim: 16
+ embedding_regularizer: 0
+ block_type: "2B"
+ batch_norm: True
+ use_feature_gating: True
+ block1_hidden_units: [[2000, 2000, 2000], [1000, 1000, 1000]]
+ block1_hidden_activations: ReLU
+ block1_dropout: [0.1, 0]
+ block2_hidden_units: [[100], [200], [500], [500, 500], [200, 200]]
+ block2_hidden_activations: ReLU
+ block2_dropout: [0.1, 0]
+ learning_rate: 1.e-3
+ batch_size: 8192
+ seed: 2019
+ monitor: {'AUC': 1, 'logloss': -1}
+ metrics: [['AUC', 'logloss']]
+ early_stop_patience: 1
diff --git a/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04/dataset_config.yaml b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04/dataset_config.yaml
new file mode 100644
index 00000000..0de8b62a
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04/dataset_config.yaml
@@ -0,0 +1,19 @@
+avazu_x4_001_a31210da:
+ data_format: csv
+ data_root: ../data/Avazu/
+ feature_cols:
+ - {active: false, dtype: str, name: id, type: categorical}
+ - {active: true, dtype: str, name: hour, preprocess: convert_hour, type: categorical}
+ - active: true
+ dtype: str
+ name: [C1, banner_pos, site_id, site_domain, site_category, app_id, app_domain,
+ app_category, device_id, device_ip, device_model, device_type, device_conn_type,
+ C14, C15, C16, C17, C18, C19, C20, C21]
+ type: categorical
+ - {active: true, dtype: str, name: weekday, preprocess: convert_weekday, type: categorical}
+ - {active: true, dtype: str, name: weekend, preprocess: convert_weekend, type: categorical}
+ label_col: {dtype: float, name: click}
+ min_categr_count: 2
+ test_data: ../data/Avazu/Avazu_x4/test.csv
+ train_data: ../data/Avazu/Avazu_x4/train.csv
+ valid_data: ../data/Avazu/Avazu_x4/valid.csv
diff --git a/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04/model_config.yaml b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04/model_config.yaml
new file mode 100644
index 00000000..8a53e865
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04/model_config.yaml
@@ -0,0 +1,1520 @@
+FinalNet_avazu_x4_001_001_021b5f8d:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [100]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_002_8c1e4350:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [100]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_003_78a15262:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_004_65c04d7c:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_005_b4887ee5:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_006_4b405413:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_007_861e52a0:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500, 500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_008_d7ebfd2d:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500, 500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_009_9b4174e4:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200, 200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_010_cada1100:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200, 200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_011_8c1e4350:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [100]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_012_9f5e50ba:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [100]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_013_65c04d7c:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_014_b1cae98e:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_015_4b405413:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_016_7871464d:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_017_d7ebfd2d:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500, 500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_018_b2ebd161:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500, 500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_019_cada1100:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200, 200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_020_ac3a250d:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [2000, 2000, 2000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200, 200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_021_a8abb409:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [100]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_022_9d00c094:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [100]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_023_867276e9:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_024_7161b11e:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_025_0ed13236:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_026_fef319c9:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_027_19b02978:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500, 500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_028_b193daff:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500, 500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_029_05eab58b:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200, 200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_030_507a70cf:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.1
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200, 200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_031_9d00c094:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [100]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_032_e02b1105:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [100]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_033_7161b11e:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_034_8d53f431:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_035_fef319c9:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_036_7917160e:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_037_b193daff:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500, 500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_038_22192711:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [500, 500]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_039_507a70cf:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.1
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200, 200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_avazu_x4_001_040_fad2874f:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [200, 200]
+ block_type: 2B
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ early_stop_patience: 1
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
diff --git a/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/README.md b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/README.md
new file mode 100644
index 00000000..24d97f93
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/README.md
@@ -0,0 +1,182 @@
+## FinalNet_avazu_x4_001
+
+A hands-on guide to run the FinalNet model on the Avazu_x4 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+
+| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
+|:-----------------------------:|:-----------:|:--------:|:--------:|-------|
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.7.10
+ pytorch: 1.10.2+cu102
+ pandas: 1.1.5
+ numpy: 1.19.5
+ scipy: 1.5.2
+ sklearn: 0.22.1
+ pyyaml: 6.0.1
+ h5py: 2.8.0
+ tqdm: 4.64.0
+ keras_preprocessing: 1.1.2
+ fuxictr: 2.2.0
+
+ ```
+
+### Dataset
+Please refer to [Avazu_x4]([Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4)) to get the dataset details.
+
+### Code
+
+We use the [FinalNet](https://github.com/reczoo/FuxiCTR/tree/v2.2.0/model_zoo/FinalNet) model code from [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/tree/v2.2.0) for this experiment.
+
+Running steps:
+
+1. Download [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.2.0.zip) and install all the dependencies listed in the [environments](#environments).
+
+ ```bash
+ pip uninstall fuxictr
+ pip install fuxictr==2.2.0
+ ```
+
+2. Create a data directory and put the downloaded data files in `../data/Avazu/Avazu_x4`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [FinalNet_avazu_x4_tuner_config_04](./FinalNet_avazu_x4_tuner_config_04). Please make sure that the data paths in `dataset_config.yaml` are correctly set.
+
+4. Run the following script to start training and evaluation.
+
+ ```bash
+ cd FuxiCTR/model_zoo/FinalNet
+ nohup python run_expid.py --config YOUR_PATH/FinalNet/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_015_4b405413 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.794116 | 0.371254 |
+
+
+### Logs
+```python
+2024-02-22 13:22:42,272 P1566359 INFO Params: {
+ "batch_norm": "True",
+ "batch_size": "8192",
+ "block1_dropout": "0",
+ "block1_hidden_activations": "ReLU",
+ "block1_hidden_units": "[2000, 2000, 2000]",
+ "block2_dropout": "0.1",
+ "block2_hidden_activations": "ReLU",
+ "block2_hidden_units": "[500]",
+ "block_type": "2B",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x4_001_a31210da",
+ "debug_mode": "False",
+ "early_stop_patience": "1",
+ "embedding_dim": "16",
+ "embedding_regularizer": "0",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': False, 'dtype': 'str', 'name': 'id', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'hour', 'preprocess': 'convert_hour', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': ['C1', 'banner_pos', 'site_id', 'site_domain', 'site_category', 'app_id', 'app_domain', 'app_category', 'device_id', 'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekday', 'preprocess': 'convert_weekday', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekend', 'preprocess': 'convert_weekend', 'type': 'categorical'}]",
+ "feature_specs": "None",
+ "gpu": "6",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'click'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "2",
+ "model": "FinalNet",
+ "model_id": "FinalNet_avazu_x4_001_015_4b405413",
+ "model_root": "./checkpoints/",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "ordered_features": "None",
+ "pickle_feature_encoder": "True",
+ "residual_type": "concat",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x4/test.csv",
+ "train_data": "../data/Avazu/Avazu_x4/train.csv",
+ "use_feature_gating": "True",
+ "valid_data": "../data/Avazu/Avazu_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-22 13:22:42,273 P1566359 INFO Set up feature processor...
+2024-02-22 13:22:42,273 P1566359 WARNING Skip rebuilding ../data/Avazu/avazu_x4_001_a31210da/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-22 13:22:42,273 P1566359 INFO Load feature_map from json: ../data/Avazu/avazu_x4_001_a31210da/feature_map.json
+2024-02-22 13:22:42,273 P1566359 INFO Set column index...
+2024-02-22 13:22:42,274 P1566359 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2556}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 10}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 434}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 70}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 173}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 62}",
+ "app_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 33}",
+ "app_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 400}",
+ "app_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6545}",
+ "banner_pos": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "device_conn_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "device_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 820509}",
+ "device_ip": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2903322}",
+ "device_model": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7259}",
+ "device_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7}",
+ "hour": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 26}",
+ "site_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "site_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5461}",
+ "site_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4051}",
+ "weekday": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "weekend": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4}"
+}
+2024-02-22 13:22:46,770 P1566359 INFO Total number of parameters: 69766586.
+2024-02-22 13:22:46,770 P1566359 INFO Loading datasets...
+2024-02-22 13:23:07,420 P1566359 INFO Train samples: total/32343172, blocks/1
+2024-02-22 13:23:09,945 P1566359 INFO Validation samples: total/4042897, blocks/1
+2024-02-22 13:23:09,946 P1566359 INFO Loading train and validation data done.
+2024-02-22 13:23:09,946 P1566359 INFO Start training: 3949 batches/epoch
+2024-02-22 13:23:09,946 P1566359 INFO ************ Epoch=1 start ************
+2024-02-22 13:28:17,207 P1566359 INFO Train loss: 0.379626
+2024-02-22 13:28:17,207 P1566359 INFO Evaluation @epoch 1 - batch 3949:
+2024-02-22 13:28:33,023 P1566359 INFO [Metrics] AUC: 0.793961 - logloss: 0.371360
+2024-02-22 13:28:33,026 P1566359 INFO Save best model: monitor(max)=0.422601
+2024-02-22 13:28:33,641 P1566359 INFO ************ Epoch=1 end ************
+2024-02-22 13:33:41,213 P1566359 INFO Train loss: 0.330151
+2024-02-22 13:33:41,213 P1566359 INFO Evaluation @epoch 2 - batch 3949:
+2024-02-22 13:33:57,482 P1566359 INFO [Metrics] AUC: 0.789077 - logloss: 0.381398
+2024-02-22 13:33:57,485 P1566359 INFO Monitor(max)=0.407679 STOP!
+2024-02-22 13:33:57,485 P1566359 INFO Reduce learning rate on plateau: 0.000100
+2024-02-22 13:33:57,485 P1566359 INFO ********* Epoch==2 early stop *********
+2024-02-22 13:33:57,530 P1566359 INFO Training finished.
+2024-02-22 13:33:57,531 P1566359 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/avazu_x4_001_a31210da/FinalNet_avazu_x4_001_015_4b405413.model
+2024-02-22 13:33:57,720 P1566359 INFO ****** Validation evaluation ******
+2024-02-22 13:34:13,540 P1566359 INFO [Metrics] AUC: 0.793961 - logloss: 0.371360
+2024-02-22 13:34:13,621 P1566359 INFO ******** Test evaluation ********
+2024-02-22 13:34:13,621 P1566359 INFO Loading datasets...
+2024-02-22 13:34:16,143 P1566359 INFO Test samples: total/4042898, blocks/1
+2024-02-22 13:34:16,143 P1566359 INFO Loading test data done.
+2024-02-22 13:34:31,959 P1566359 INFO [Metrics] AUC: 0.794116 - logloss: 0.371254
+
+```
diff --git a/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/environments.txt b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/environments.txt
new file mode 100644
index 00000000..5415575c
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/environments.txt
@@ -0,0 +1,18 @@
+[Hardware]
+CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+GPU: Tesla V100 32G
+RAM: 755G
+
+[Software]
+CUDA: 10.2
+python: 3.7.10
+pytorch: 1.10.2+cu102
+pandas: 1.1.5
+numpy: 1.19.5
+scipy: 1.5.2
+sklearn: 0.22.1
+pyyaml: 6.0.1
+h5py: 2.8.0
+tqdm: 4.64.0
+keras_preprocessing: 1.1.2
+fuxictr: 2.2.0
diff --git a/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/results.csv b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/results.csv
new file mode 100644
index 00000000..e060c65c
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_avazu_x4_001/results.csv
@@ -0,0 +1 @@
+ 20240222-133431,[command] python run_expid.py --config Avazu_x4/FinalNet_avazu_x4_001/FinalNet_avazu_x4_tuner_config_04 --expid FinalNet_avazu_x4_001_015_4b405413 --gpu 6,[exp_id] FinalNet_avazu_x4_001_015_4b405413,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793961 - logloss: 0.371360,[test] AUC: 0.794116 - logloss: 0.371254
diff --git a/ranking/ctr/FinalNet/FinalNet_criteo_x1/FinalNet_1B_criteo_x1/README.md b/ranking/ctr/FinalNet/FinalNet_criteo_x1/FinalNet_1B_criteo_x1/README.md
index ed782867..f0cee51a 100644
--- a/ranking/ctr/FinalNet/FinalNet_criteo_x1/FinalNet_1B_criteo_x1/README.md
+++ b/ranking/ctr/FinalNet/FinalNet_criteo_x1/FinalNet_1B_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FINAL model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -39,11 +39,11 @@ Please refer to the BARS dataset [Criteo_x1](https://github.com/openbenchmark/BA
### Code
-We use the [FINAL](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [FINAL](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalNet/FinalNet_criteo_x1/FinalNet_2B_criteo_x1/README.md b/ranking/ctr/FinalNet/FinalNet_criteo_x1/FinalNet_2B_criteo_x1/README.md
index d41295c8..0826ffde 100644
--- a/ranking/ctr/FinalNet/FinalNet_criteo_x1/FinalNet_2B_criteo_x1/README.md
+++ b/ranking/ctr/FinalNet/FinalNet_criteo_x1/FinalNet_2B_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FINAL model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -39,11 +39,11 @@ Please refer to the BARS dataset [Criteo_x1](https://github.com/openbenchmark/BA
### Code
-We use the [FINAL](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [FINAL](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_001_041_449ccb21.log b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_001_041_449ccb21.log
new file mode 100644
index 00000000..a7f50073
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_001_041_449ccb21.log
@@ -0,0 +1,188 @@
+2024-02-21 01:09:14,380 P2661590 INFO Params: {
+ "batch_norm": "True",
+ "batch_size": "8192",
+ "block1_dropout": "0.4",
+ "block1_hidden_activations": "ReLU",
+ "block1_hidden_units": "[1000, 1000, 1000]",
+ "block2_dropout": "0.4",
+ "block2_hidden_activations": "ReLU",
+ "block2_hidden_units": "[512]",
+ "block_type": "2B",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x4_001_a5e05ce7",
+ "debug_mode": "False",
+ "early_stop_patience": "2",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'fill_na': 0, 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'preprocess': 'convert_to_bucket', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'fill_na': '', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "feature_specs": "None",
+ "gpu": "0",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'Label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "10",
+ "model": "FinalNet",
+ "model_id": "FinalNet_criteo_x4_001_041_449ccb21",
+ "model_root": "./checkpoints/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "ordered_features": "None",
+ "pickle_feature_encoder": "True",
+ "residual_type": "concat",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x4/test.csv",
+ "train_data": "../data/Criteo/Criteo_x4/train.csv",
+ "use_feature_gating": "True",
+ "valid_data": "../data/Criteo/Criteo_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-21 01:09:14,381 P2661590 INFO Set up feature processor...
+2024-02-21 01:09:14,382 P2661590 WARNING Skip rebuilding ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-21 01:09:14,382 P2661590 INFO Load feature_map from json: ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json
+2024-02-21 01:09:14,382 P2661590 INFO Set column index...
+2024-02-21 01:09:14,383 P2661590 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 1446}",
+ "C10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 39530}",
+ "C11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5131}",
+ "C12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 156656}",
+ "C13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 3176}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 28}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11043}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 148913}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 12}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4560}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2003}",
+ "C2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 554}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 154564}",
+ "C22": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C23": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 17}",
+ "C24": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 53031}",
+ "C25": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 82}",
+ "C26": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 40955}",
+ "C3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 157339}",
+ "C4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 117822}",
+ "C5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 306}",
+ "C6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11882}",
+ "C8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 630}",
+ "C9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "I1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 44}",
+ "I10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "I11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "I12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 37}",
+ "I13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 72}",
+ "I2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 99}",
+ "I3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 122}",
+ "I4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 41}",
+ "I5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 220}",
+ "I6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 112}",
+ "I7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 80}",
+ "I8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 69}",
+ "I9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 92}"
+}
+2024-02-21 01:09:18,212 P2661590 INFO Total number of parameters: 18153050.
+2024-02-21 01:09:18,212 P2661590 INFO Loading datasets...
+2024-02-21 01:09:54,793 P2661590 INFO Train samples: total/36672493, blocks/1
+2024-02-21 01:09:59,444 P2661590 INFO Validation samples: total/4584062, blocks/1
+2024-02-21 01:09:59,444 P2661590 INFO Loading train and validation data done.
+2024-02-21 01:09:59,444 P2661590 INFO Start training: 4477 batches/epoch
+2024-02-21 01:09:59,444 P2661590 INFO ************ Epoch=1 start ************
+2024-02-21 01:14:51,100 P2661590 INFO Train loss: 0.460479
+2024-02-21 01:14:51,100 P2661590 INFO Evaluation @epoch 1 - batch 4477:
+2024-02-21 01:15:05,073 P2661590 INFO [Metrics] AUC: 0.805437
+2024-02-21 01:15:05,074 P2661590 INFO Save best model: monitor(max)=0.805437
+2024-02-21 01:15:05,251 P2661590 INFO ************ Epoch=1 end ************
+2024-02-21 01:19:58,419 P2661590 INFO Train loss: 0.454513
+2024-02-21 01:19:58,419 P2661590 INFO Evaluation @epoch 2 - batch 4477:
+2024-02-21 01:20:12,691 P2661590 INFO [Metrics] AUC: 0.807590
+2024-02-21 01:20:12,692 P2661590 INFO Save best model: monitor(max)=0.807590
+2024-02-21 01:20:12,879 P2661590 INFO ************ Epoch=2 end ************
+2024-02-21 01:25:03,483 P2661590 INFO Train loss: 0.453117
+2024-02-21 01:25:03,484 P2661590 INFO Evaluation @epoch 3 - batch 4477:
+2024-02-21 01:25:17,838 P2661590 INFO [Metrics] AUC: 0.808788
+2024-02-21 01:25:17,840 P2661590 INFO Save best model: monitor(max)=0.808788
+2024-02-21 01:25:18,032 P2661590 INFO ************ Epoch=3 end ************
+2024-02-21 01:30:09,428 P2661590 INFO Train loss: 0.452215
+2024-02-21 01:30:09,429 P2661590 INFO Evaluation @epoch 4 - batch 4477:
+2024-02-21 01:30:23,782 P2661590 INFO [Metrics] AUC: 0.809404
+2024-02-21 01:30:23,783 P2661590 INFO Save best model: monitor(max)=0.809404
+2024-02-21 01:30:23,982 P2661590 INFO ************ Epoch=4 end ************
+2024-02-21 01:35:14,773 P2661590 INFO Train loss: 0.451528
+2024-02-21 01:35:14,773 P2661590 INFO Evaluation @epoch 5 - batch 4477:
+2024-02-21 01:35:29,205 P2661590 INFO [Metrics] AUC: 0.809996
+2024-02-21 01:35:29,207 P2661590 INFO Save best model: monitor(max)=0.809996
+2024-02-21 01:35:29,392 P2661590 INFO ************ Epoch=5 end ************
+2024-02-21 01:40:23,698 P2661590 INFO Train loss: 0.450978
+2024-02-21 01:40:23,699 P2661590 INFO Evaluation @epoch 6 - batch 4477:
+2024-02-21 01:40:37,685 P2661590 INFO [Metrics] AUC: 0.810492
+2024-02-21 01:40:37,690 P2661590 INFO Save best model: monitor(max)=0.810492
+2024-02-21 01:40:37,886 P2661590 INFO ************ Epoch=6 end ************
+2024-02-21 01:45:31,541 P2661590 INFO Train loss: 0.450588
+2024-02-21 01:45:31,542 P2661590 INFO Evaluation @epoch 7 - batch 4477:
+2024-02-21 01:45:45,835 P2661590 INFO [Metrics] AUC: 0.810681
+2024-02-21 01:45:45,836 P2661590 INFO Save best model: monitor(max)=0.810681
+2024-02-21 01:45:46,015 P2661590 INFO ************ Epoch=7 end ************
+2024-02-21 01:50:42,165 P2661590 INFO Train loss: 0.450212
+2024-02-21 01:50:42,165 P2661590 INFO Evaluation @epoch 8 - batch 4477:
+2024-02-21 01:50:56,202 P2661590 INFO [Metrics] AUC: 0.811053
+2024-02-21 01:50:56,206 P2661590 INFO Save best model: monitor(max)=0.811053
+2024-02-21 01:50:56,395 P2661590 INFO ************ Epoch=8 end ************
+2024-02-21 01:55:51,990 P2661590 INFO Train loss: 0.449913
+2024-02-21 01:55:51,990 P2661590 INFO Evaluation @epoch 9 - batch 4477:
+2024-02-21 01:56:05,814 P2661590 INFO [Metrics] AUC: 0.811255
+2024-02-21 01:56:05,815 P2661590 INFO Save best model: monitor(max)=0.811255
+2024-02-21 01:56:06,019 P2661590 INFO ************ Epoch=9 end ************
+2024-02-21 02:01:03,880 P2661590 INFO Train loss: 0.449685
+2024-02-21 02:01:03,881 P2661590 INFO Evaluation @epoch 10 - batch 4477:
+2024-02-21 02:01:17,759 P2661590 INFO [Metrics] AUC: 0.811483
+2024-02-21 02:01:17,761 P2661590 INFO Save best model: monitor(max)=0.811483
+2024-02-21 02:01:17,949 P2661590 INFO ************ Epoch=10 end ************
+2024-02-21 02:06:12,912 P2661590 INFO Train loss: 0.449436
+2024-02-21 02:06:12,913 P2661590 INFO Evaluation @epoch 11 - batch 4477:
+2024-02-21 02:06:26,670 P2661590 INFO [Metrics] AUC: 0.811428
+2024-02-21 02:06:26,671 P2661590 INFO Monitor(max)=0.811428 STOP!
+2024-02-21 02:06:26,671 P2661590 INFO Reduce learning rate on plateau: 0.000100
+2024-02-21 02:06:26,744 P2661590 INFO ************ Epoch=11 end ************
+2024-02-21 02:11:23,197 P2661590 INFO Train loss: 0.439517
+2024-02-21 02:11:23,197 P2661590 INFO Evaluation @epoch 12 - batch 4477:
+2024-02-21 02:11:36,759 P2661590 INFO [Metrics] AUC: 0.814074
+2024-02-21 02:11:36,761 P2661590 INFO Save best model: monitor(max)=0.814074
+2024-02-21 02:11:36,947 P2661590 INFO ************ Epoch=12 end ************
+2024-02-21 02:16:33,347 P2661590 INFO Train loss: 0.435572
+2024-02-21 02:16:33,347 P2661590 INFO Evaluation @epoch 13 - batch 4477:
+2024-02-21 02:16:47,262 P2661590 INFO [Metrics] AUC: 0.814462
+2024-02-21 02:16:47,263 P2661590 INFO Save best model: monitor(max)=0.814462
+2024-02-21 02:16:47,448 P2661590 INFO ************ Epoch=13 end ************
+2024-02-21 02:21:42,706 P2661590 INFO Train loss: 0.433620
+2024-02-21 02:21:42,707 P2661590 INFO Evaluation @epoch 14 - batch 4477:
+2024-02-21 02:21:56,657 P2661590 INFO [Metrics] AUC: 0.814439
+2024-02-21 02:21:56,662 P2661590 INFO Monitor(max)=0.814439 STOP!
+2024-02-21 02:21:56,662 P2661590 INFO Reduce learning rate on plateau: 0.000010
+2024-02-21 02:21:56,737 P2661590 INFO ************ Epoch=14 end ************
+2024-02-21 02:26:51,667 P2661590 INFO Train loss: 0.430005
+2024-02-21 02:26:51,668 P2661590 INFO Evaluation @epoch 15 - batch 4477:
+2024-02-21 02:27:05,795 P2661590 INFO [Metrics] AUC: 0.814147
+2024-02-21 02:27:05,796 P2661590 INFO Monitor(max)=0.814147 STOP!
+2024-02-21 02:27:05,796 P2661590 INFO Reduce learning rate on plateau: 0.000001
+2024-02-21 02:27:05,796 P2661590 INFO ********* Epoch==15 early stop *********
+2024-02-21 02:27:05,869 P2661590 INFO Training finished.
+2024-02-21 02:27:05,869 P2661590 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/criteo_x4_001_a5e05ce7/FinalNet_criteo_x4_001_041_449ccb21.model
+2024-02-21 02:27:05,947 P2661590 INFO ****** Validation evaluation ******
+2024-02-21 02:27:21,204 P2661590 INFO [Metrics] AUC: 0.814462 - logloss: 0.437531
+2024-02-21 02:27:21,319 P2661590 INFO ******** Test evaluation ********
+2024-02-21 02:27:21,320 P2661590 INFO Loading datasets...
+2024-02-21 02:27:25,802 P2661590 INFO Test samples: total/4584062, blocks/1
+2024-02-21 02:27:25,802 P2661590 INFO Loading test data done.
+2024-02-21 02:27:41,261 P2661590 INFO [Metrics] AUC: 0.814966 - logloss: 0.437116
diff --git a/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05.csv b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05.csv
new file mode 100644
index 00000000..c1e2be1c
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05.csv
@@ -0,0 +1,72 @@
+ 20240221-022741,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_041_449ccb21 --gpu 0,[exp_id] FinalNet_criteo_x4_001_041_449ccb21,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814462 - logloss: 0.437531,[test] AUC: 0.814966 - logloss: 0.437116
+ 20240220-231237,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_019_0ac6a14c --gpu 0,[exp_id] FinalNet_criteo_x4_001_019_0ac6a14c,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814530 - logloss: 0.437608,[test] AUC: 0.814965 - logloss: 0.437215
+ 20240221-015632,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_037_d5f841f3 --gpu 1,[exp_id] FinalNet_criteo_x4_001_037_d5f841f3,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814423 - logloss: 0.437743,[test] AUC: 0.814913 - logloss: 0.437300
+ 20240221-052604,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_059_b158140a --gpu 3,[exp_id] FinalNet_criteo_x4_001_059_b158140a,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814380 - logloss: 0.437776,[test] AUC: 0.814874 - logloss: 0.437317
+ 20240220-214521,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_012_6f1e00cf --gpu 1,[exp_id] FinalNet_criteo_x4_001_012_6f1e00cf,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814355 - logloss: 0.437823,[test] AUC: 0.814858 - logloss: 0.437380
+ 20240221-034053,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_047_69244e2b --gpu 6,[exp_id] FinalNet_criteo_x4_001_047_69244e2b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814356 - logloss: 0.437646,[test] AUC: 0.814847 - logloss: 0.437213
+ 20240220-202621,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_001_9e6a2ff9 --gpu 0,[exp_id] FinalNet_criteo_x4_001_001_9e6a2ff9,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814366 - logloss: 0.437677,[test] AUC: 0.814830 - logloss: 0.437297
+ 20240221-001009,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_029_4559df1f --gpu 4,[exp_id] FinalNet_criteo_x4_001_029_4559df1f,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814388 - logloss: 0.437623,[test] AUC: 0.814814 - logloss: 0.437272
+ 20240220-203741,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_002_e781ad47 --gpu 1,[exp_id] FinalNet_criteo_x4_001_002_e781ad47,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814344 - logloss: 0.437788,[test] AUC: 0.814811 - logloss: 0.437388
+ 20240221-022720,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_042_69244e2b --gpu 7,[exp_id] FinalNet_criteo_x4_001_042_69244e2b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814295 - logloss: 0.437781,[test] AUC: 0.814799 - logloss: 0.437341
+ 20240220-204505,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_005_618d8b09 --gpu 4,[exp_id] FinalNet_criteo_x4_001_005_618d8b09,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814315 - logloss: 0.437760,[test] AUC: 0.814798 - logloss: 0.437342
+ 20240221-063259,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_065_448e43dd --gpu 7,[exp_id] FinalNet_criteo_x4_001_065_448e43dd,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814387 - logloss: 0.437674,[test] AUC: 0.814778 - logloss: 0.437345
+ 20240221-001441,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_027_3b0eb4b5 --gpu 1,[exp_id] FinalNet_criteo_x4_001_027_3b0eb4b5,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814404 - logloss: 0.437656,[test] AUC: 0.814776 - logloss: 0.437328
+ 20240221-000951,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_028_d8e0492d --gpu 3,[exp_id] FinalNet_criteo_x4_001_028_d8e0492d,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814268 - logloss: 0.437757,[test] AUC: 0.814760 - logloss: 0.437312
+ 20240220-230814,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_022_3b0eb4b5 --gpu 3,[exp_id] FinalNet_criteo_x4_001_022_3b0eb4b5,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814378 - logloss: 0.437641,[test] AUC: 0.814756 - logloss: 0.437319
+ 20240221-001328,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_030_ff457aab --gpu 0,[exp_id] FinalNet_criteo_x4_001_030_ff457aab,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814332 - logloss: 0.437766,[test] AUC: 0.814747 - logloss: 0.437392
+ 20240221-001259,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_025_263a12f0 --gpu 7,[exp_id] FinalNet_criteo_x4_001_025_263a12f0,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814378 - logloss: 0.437858,[test] AUC: 0.814732 - logloss: 0.437524
+ 20240220-230939,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_023_ac53fa38 --gpu 4,[exp_id] FinalNet_criteo_x4_001_023_ac53fa38,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814265 - logloss: 0.437776,[test] AUC: 0.814729 - logloss: 0.437398
+ 20240220-203834,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_007_e781ad47 --gpu 6,[exp_id] FinalNet_criteo_x4_001_007_e781ad47,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814300 - logloss: 0.437808,[test] AUC: 0.814722 - logloss: 0.437444
+ 20240220-230023,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_021_030cd8fd --gpu 5,[exp_id] FinalNet_criteo_x4_001_021_030cd8fd,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814331 - logloss: 0.437651,[test] AUC: 0.814713 - logloss: 0.437323
+ 20240220-215202,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_013_41ddbb28 --gpu 6,[exp_id] FinalNet_criteo_x4_001_013_41ddbb28,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814246 - logloss: 0.437941,[test] AUC: 0.814701 - logloss: 0.437523
+ 20240221-052248,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_060_448e43dd --gpu 2,[exp_id] FinalNet_criteo_x4_001_060_448e43dd,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814188 - logloss: 0.437830,[test] AUC: 0.814695 - logloss: 0.437379
+ 20240221-001700,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_026_ad3e257e --gpu 5,[exp_id] FinalNet_criteo_x4_001_026_ad3e257e,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814246 - logloss: 0.437809,[test] AUC: 0.814686 - logloss: 0.437442
+ 20240221-052158,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_058_5316aa02 --gpu 4,[exp_id] FinalNet_criteo_x4_001_058_5316aa02,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814245 - logloss: 0.437844,[test] AUC: 0.814676 - logloss: 0.437477
+ 20240220-214134,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_010_545829a0 --gpu 7,[exp_id] FinalNet_criteo_x4_001_010_545829a0,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814241 - logloss: 0.437861,[test] AUC: 0.814668 - logloss: 0.437470
+ 20240220-220629,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_011_c3a8223a --gpu 3,[exp_id] FinalNet_criteo_x4_001_011_c3a8223a,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814174 - logloss: 0.438066,[test] AUC: 0.814662 - logloss: 0.437635
+ 20240220-231901,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_020_263a12f0 --gpu 6,[exp_id] FinalNet_criteo_x4_001_020_263a12f0,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814215 - logloss: 0.437867,[test] AUC: 0.814661 - logloss: 0.437485
+ 20240220-215317,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_014_19d597d6 --gpu 5,[exp_id] FinalNet_criteo_x4_001_014_19d597d6,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814201 - logloss: 0.438066,[test] AUC: 0.814659 - logloss: 0.437653
+ 20240221-015845,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_038_07156d5a --gpu 5,[exp_id] FinalNet_criteo_x4_001_038_07156d5a,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814291 - logloss: 0.437986,[test] AUC: 0.814657 - logloss: 0.437664
+ 20240221-025303,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_043_07156d5a --gpu 4,[exp_id] FinalNet_criteo_x4_001_043_07156d5a,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814208 - logloss: 0.437986,[test] AUC: 0.814622 - logloss: 0.437609
+ 20240221-034558,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_048_ee89ec2e --gpu 7,[exp_id] FinalNet_criteo_x4_001_048_ee89ec2e,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814248 - logloss: 0.437948,[test] AUC: 0.814618 - logloss: 0.437611
+ 20240221-063645,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_063_5316aa02 --gpu 4,[exp_id] FinalNet_criteo_x4_001_063_5316aa02,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814254 - logloss: 0.437820,[test] AUC: 0.814611 - logloss: 0.437520
+ 20240220-203202,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_008_0d2aed8f --gpu 7,[exp_id] FinalNet_criteo_x4_001_008_0d2aed8f,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814224 - logloss: 0.437997,[test] AUC: 0.814609 - logloss: 0.437654
+ 20240221-063522,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_066_22935526 --gpu 3,[exp_id] FinalNet_criteo_x4_001_066_22935526,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814163 - logloss: 0.437906,[test] AUC: 0.814605 - logloss: 0.437554
+ 20240220-204031,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_006_c3a8223a --gpu 5,[exp_id] FinalNet_criteo_x4_001_006_c3a8223a,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814116 - logloss: 0.438138,[test] AUC: 0.814598 - logloss: 0.437751
+ 20240220-233325,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_024_4559df1f --gpu 2,[exp_id] FinalNet_criteo_x4_001_024_4559df1f,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814154 - logloss: 0.438161,[test] AUC: 0.814590 - logloss: 0.437789
+ 20240221-010911,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_035_77ce592e --gpu 7,[exp_id] FinalNet_criteo_x4_001_035_77ce592e,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814157 - logloss: 0.437887,[test] AUC: 0.814590 - logloss: 0.437514
+ 20240221-040637,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_051_125107ff --gpu 4,[exp_id] FinalNet_criteo_x4_001_051_125107ff,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814148 - logloss: 0.437954,[test] AUC: 0.814587 - logloss: 0.437559
+ 20240221-011201,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_033_6d65297a --gpu 3,[exp_id] FinalNet_criteo_x4_001_033_6d65297a,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814184 - logloss: 0.437900,[test] AUC: 0.814577 - logloss: 0.437572
+ 20240220-203243,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_004_360523e9 --gpu 3,[exp_id] FinalNet_criteo_x4_001_004_360523e9,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814154 - logloss: 0.437915,[test] AUC: 0.814575 - logloss: 0.437546
+ 20240220-215028,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_009_360523e9 --gpu 0,[exp_id] FinalNet_criteo_x4_001_009_360523e9,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814081 - logloss: 0.437992,[test] AUC: 0.814545 - logloss: 0.437574
+ 20240221-061859,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_061_8ee1575b --gpu 5,[exp_id] FinalNet_criteo_x4_001_061_8ee1575b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814111 - logloss: 0.438220,[test] AUC: 0.814541 - logloss: 0.437841
+ 20240220-225106,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_017_1ae1bc89 --gpu 7,[exp_id] FinalNet_criteo_x4_001_017_1ae1bc89,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814017 - logloss: 0.438208,[test] AUC: 0.814538 - logloss: 0.437719
+ 20240220-205345,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_003_1d006877 --gpu 2,[exp_id] FinalNet_criteo_x4_001_003_1d006877,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814058 - logloss: 0.438014,[test] AUC: 0.814536 - logloss: 0.437618
+ 20240221-003540,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_031_e4e013ce --gpu 6,[exp_id] FinalNet_criteo_x4_001_031_e4e013ce,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814084 - logloss: 0.438059,[test] AUC: 0.814532 - logloss: 0.437669
+ 20240221-010909,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_036_7acf3ced --gpu 0,[exp_id] FinalNet_criteo_x4_001_036_7acf3ced,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814143 - logloss: 0.437917,[test] AUC: 0.814510 - logloss: 0.437617
+ 20240221-054228,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_057_e96640a5 --gpu 0,[exp_id] FinalNet_criteo_x4_001_057_e96640a5,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814141 - logloss: 0.437988,[test] AUC: 0.814502 - logloss: 0.437642
+ 20240221-011133,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_034_9661d73e --gpu 4,[exp_id] FinalNet_criteo_x4_001_034_9661d73e,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814019 - logloss: 0.438173,[test] AUC: 0.814485 - logloss: 0.437737
+ 20240221-052354,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_056_8ee1575b --gpu 7,[exp_id] FinalNet_criteo_x4_001_056_8ee1575b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814018 - logloss: 0.438206,[test] AUC: 0.814483 - logloss: 0.437775
+ 20240221-053528,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_055_dc1bc1cc --gpu 6,[exp_id] FinalNet_criteo_x4_001_055_dc1bc1cc,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814050 - logloss: 0.438183,[test] AUC: 0.814473 - logloss: 0.437807
+ 20240221-025409,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_044_740eea70 --gpu 3,[exp_id] FinalNet_criteo_x4_001_044_740eea70,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814055 - logloss: 0.438276,[test] AUC: 0.814442 - logloss: 0.437958
+ 20240221-032309,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_046_c3fd391f --gpu 5,[exp_id] FinalNet_criteo_x4_001_046_c3fd391f,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813998 - logloss: 0.438053,[test] AUC: 0.814410 - logloss: 0.437708
+ 20240221-022235,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_039_0c16b3de --gpu 6,[exp_id] FinalNet_criteo_x4_001_039_0c16b3de,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813996 - logloss: 0.438025,[test] AUC: 0.814409 - logloss: 0.437667
+ 20240221-074245,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_070_58816d96 --gpu 1,[exp_id] FinalNet_criteo_x4_001_070_58816d96,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813870 - logloss: 0.438294,[test] AUC: 0.814375 - logloss: 0.437823
+ 20240221-040716,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_052_e8c879f1 --gpu 3,[exp_id] FinalNet_criteo_x4_001_052_e8c879f1,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813937 - logloss: 0.438185,[test] AUC: 0.814373 - logloss: 0.437797
+ 20240221-065536,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_067_f375b4ca --gpu 6,[exp_id] FinalNet_criteo_x4_001_067_f375b4ca,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813996 - logloss: 0.438174,[test] AUC: 0.814366 - logloss: 0.437838
+ 20240221-005040,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_032_f9c21974 --gpu 2,[exp_id] FinalNet_criteo_x4_001_032_f9c21974,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813870 - logloss: 0.438343,[test] AUC: 0.814298 - logloss: 0.437978
+ 20240221-033737,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_045_4a41a3c8 --gpu 1,[exp_id] FinalNet_criteo_x4_001_045_4a41a3c8,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813846 - logloss: 0.438255,[test] AUC: 0.814293 - logloss: 0.437866
+ 20240221-044149,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_053_c1ebd495 --gpu 5,[exp_id] FinalNet_criteo_x4_001_053_c1ebd495,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813678 - logloss: 0.438515,[test] AUC: 0.814268 - logloss: 0.437991
+ 20240221-063256,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_062_529f1ae4 --gpu 1,[exp_id] FinalNet_criteo_x4_001_062_529f1ae4,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813740 - logloss: 0.438669,[test] AUC: 0.814245 - logloss: 0.438184
+ 20240221-074112,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_071_0fd8239b --gpu 7,[exp_id] FinalNet_criteo_x4_001_071_0fd8239b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813833 - logloss: 0.438404,[test] AUC: 0.814210 - logloss: 0.438081
+ 20240221-023758,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_040_4a41a3c8 --gpu 2,[exp_id] FinalNet_criteo_x4_001_040_4a41a3c8,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813789 - logloss: 0.438363,[test] AUC: 0.814100 - logloss: 0.438073
+ 20240221-074406,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_072_e2420cee --gpu 3,[exp_id] FinalNet_criteo_x4_001_072_e2420cee,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813720 - logloss: 0.438488,[test] AUC: 0.814076 - logloss: 0.438204
+ 20240221-045553,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_054_a74b39c1 --gpu 1,[exp_id] FinalNet_criteo_x4_001_054_a74b39c1,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813633 - logloss: 0.438630,[test] AUC: 0.814073 - logloss: 0.438227
+ 20240221-073402,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_069_4159cf80 --gpu 5,[exp_id] FinalNet_criteo_x4_001_069_4159cf80,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813658 - logloss: 0.438559,[test] AUC: 0.814066 - logloss: 0.438155
+ 20240220-220946,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_015_9ea954fe --gpu 4,[exp_id] FinalNet_criteo_x4_001_015_9ea954fe,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813631 - logloss: 0.438569,[test] AUC: 0.814056 - logloss: 0.438206
+ 20240221-040147,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_049_dcc13f68 --gpu 0,[exp_id] FinalNet_criteo_x4_001_049_dcc13f68,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813558 - logloss: 0.438654,[test] AUC: 0.813986 - logloss: 0.438233
+ 20240220-230811,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_018_e466ac5f --gpu 1,[exp_id] FinalNet_criteo_x4_001_018_e466ac5f,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813507 - logloss: 0.438915,[test] AUC: 0.813898 - logloss: 0.438540
+ 20240221-064829,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_064_cc7fc3f9 --gpu 2,[exp_id] FinalNet_criteo_x4_001_064_cc7fc3f9,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813403 - logloss: 0.438620,[test] AUC: 0.813898 - logloss: 0.438174
+ 20240221-041308,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_050_963a7525 --gpu 2,[exp_id] FinalNet_criteo_x4_001_050_963a7525,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813317 - logloss: 0.438878,[test] AUC: 0.813830 - logloss: 0.438401
+ 20240220-221903,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_016_38ed780e --gpu 2,[exp_id] FinalNet_criteo_x4_001_016_38ed780e,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813382 - logloss: 0.438941,[test] AUC: 0.813803 - logloss: 0.438595
+ 20240221-070801,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_068_326aa070 --gpu 0,[exp_id] FinalNet_criteo_x4_001_068_326aa070,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.813459 - logloss: 0.438957,[test] AUC: 0.813796 - logloss: 0.438661
diff --git a/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05.yaml b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05.yaml
new file mode 100644
index 00000000..3fc0d07c
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05.yaml
@@ -0,0 +1,46 @@
+base_config: ../model_zoo/FinalNet/config/
+base_expid: FinalNet_default
+dataset_id: criteo_x4_001
+
+dataset_config:
+ criteo_x4_001:
+ data_format: csv
+ data_root: ../data/Criteo/
+ feature_cols:
+ - active: true
+ dtype: float
+ fill_na: 0
+ name: [I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13]
+ preprocess: convert_to_bucket
+ type: categorical
+ - active: true
+ dtype: str
+ fill_na: ''
+ name: [C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16,
+ C17, C18, C19, C20, C21, C22, C23, C24, C25, C26]
+ type: categorical
+ label_col: {dtype: float, name: Label}
+ min_categr_count: 10
+ test_data: ../data/Criteo/Criteo_x4/test.csv
+ train_data: ../data/Criteo/Criteo_x4/train.csv
+ valid_data: ../data/Criteo/Criteo_x4/valid.csv
+
+tuner_space:
+ model_root: './checkpoints/'
+ embedding_dim: 16
+ embedding_regularizer: 1.e-5
+ block_type: "2B"
+ batch_norm: True
+ use_feature_gating: [False, True]
+ block1_hidden_units: [[1000, 1000, 1000], [1024, 512, 256]]
+ block1_hidden_activations: ReLU
+ block1_dropout: [0.4, 0.3, 0.2]
+ block2_hidden_units: [[1000, 1000, 1000], [1024, 512], [512]]
+ block2_hidden_activations: ReLU
+ block2_dropout: [0.4, 0.3]
+ learning_rate: 1.e-3
+ batch_size: 8192
+ seed: 2019
+ monitor: 'AUC'
+ monitor_mode: 'max'
+ metrics: [['AUC', 'logloss']]
diff --git a/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05/dataset_config.yaml b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05/dataset_config.yaml
new file mode 100644
index 00000000..73e334c1
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05/dataset_config.yaml
@@ -0,0 +1,21 @@
+criteo_x4_001_a5e05ce7:
+ data_format: csv
+ data_root: ../data/Criteo/
+ feature_cols:
+ - active: true
+ dtype: float
+ fill_na: 0
+ name: [I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13]
+ preprocess: convert_to_bucket
+ type: categorical
+ - active: true
+ dtype: str
+ fill_na: ''
+ name: [C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16,
+ C17, C18, C19, C20, C21, C22, C23, C24, C25, C26]
+ type: categorical
+ label_col: {dtype: float, name: Label}
+ min_categr_count: 10
+ test_data: ../data/Criteo/Criteo_x4/test.csv
+ train_data: ../data/Criteo/Criteo_x4/train.csv
+ valid_data: ../data/Criteo/Criteo_x4/valid.csv
diff --git a/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05/model_config.yaml b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05/model_config.yaml
new file mode 100644
index 00000000..a0e70cc1
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05/model_config.yaml
@@ -0,0 +1,2736 @@
+FinalNet_criteo_x4_001_001_9e6a2ff9:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_002_e781ad47:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_003_1d006877:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_004_360523e9:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_005_618d8b09:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_006_c3a8223a:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_007_e781ad47:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_008_0d2aed8f:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_009_360523e9:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_010_545829a0:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_011_c3a8223a:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_012_6f1e00cf:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_013_41ddbb28:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_014_19d597d6:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_015_9ea954fe:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_016_38ed780e:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_017_1ae1bc89:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_018_e466ac5f:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_019_0ac6a14c:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_020_263a12f0:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_021_030cd8fd:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_022_3b0eb4b5:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_023_ac53fa38:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_024_4559df1f:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_025_263a12f0:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_026_ad3e257e:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_027_3b0eb4b5:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_028_d8e0492d:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_029_4559df1f:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_030_ff457aab:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_031_e4e013ce:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_032_f9c21974:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_033_6d65297a:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_034_9661d73e:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_035_77ce592e:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_036_7acf3ced:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: false
+ verbose: 1
+FinalNet_criteo_x4_001_037_d5f841f3:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_038_07156d5a:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_039_0c16b3de:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_040_4a41a3c8:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_041_449ccb21:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_042_69244e2b:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_043_07156d5a:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_044_740eea70:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_045_4a41a3c8:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_046_c3fd391f:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_047_69244e2b:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_048_ee89ec2e:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_049_dcc13f68:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_050_963a7525:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_051_125107ff:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_052_e8c879f1:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_053_c1ebd495:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_054_a74b39c1:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1000, 1000, 1000]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_055_dc1bc1cc:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_056_8ee1575b:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_057_e96640a5:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_058_5316aa02:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_059_b158140a:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_060_448e43dd:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.4
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_061_8ee1575b:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_062_529f1ae4:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_063_5316aa02:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_064_cc7fc3f9:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_065_448e43dd:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_066_22935526:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.3
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_067_f375b4ca:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_068_326aa070:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1000, 1000, 1000]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_069_4159cf80:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_070_58816d96:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [1024, 512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_071_0fd8239b:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.4
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
+FinalNet_criteo_x4_001_072_e2420cee:
+ batch_norm: true
+ batch_size: 8192
+ block1_dropout: 0.2
+ block1_hidden_activations: ReLU
+ block1_hidden_units: [1024, 512, 256]
+ block2_dropout: 0.3
+ block2_hidden_activations: ReLU
+ block2_hidden_units: [512]
+ block_type: 2B
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ early_stop_patience: 2
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: FinalNet
+ model_root: ./checkpoints/
+ monitor: AUC
+ monitor_mode: max
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ ordered_features: null
+ pickle_feature_encoder: true
+ residual_type: concat
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_feature_gating: true
+ verbose: 1
diff --git a/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/README.md b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/README.md
new file mode 100644
index 00000000..d21da242
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/README.md
@@ -0,0 +1,263 @@
+## FinalNet_criteo_x4_001
+
+A hands-on guide to run the FinalNet model on the Criteo_x4 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+
+| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
+|:-----------------------------:|:-----------:|:--------:|:--------:|-------|
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.7.10
+ pytorch: 1.10.2+cu102
+ pandas: 1.1.5
+ numpy: 1.19.5
+ scipy: 1.5.2
+ sklearn: 0.22.1
+ pyyaml: 6.0.1
+ h5py: 2.8.0
+ tqdm: 4.64.0
+ keras_preprocessing: 1.1.2
+ fuxictr: 2.2.0
+ ```
+
+### Dataset
+Please refer to [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4) to get the dataset details.
+
+### Code
+
+We use the [FinalNet](https://github.com/reczoo/FuxiCTR/tree/v2.2.0/model_zoo/FinalNet) model code from [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/tree/v2.2.0) for this experiment.
+
+Running steps:
+
+1. Download [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.2.0.zip) and install all the dependencies listed in the [environments](#environments).
+
+ ```bash
+ pip uninstall fuxictr
+ pip install fuxictr==2.2.0
+ ```
+
+2. Create a data directory and put the downloaded data files in `../data/Criteo/Criteo_x4`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [FinalNet_criteo_x4_tuner_config_05](./FinalNet_criteo_x4_tuner_config_05). Please make sure that the data paths in `dataset_config.yaml` are correctly set.
+
+4. Run the following script to start training and evaluation.
+
+ ```bash
+ cd FuxiCTR/model_zoo/FinalNet
+ nohup python run_expid.py --config YOUR_PATH/FinalNet/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_041_449ccb21 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.814966 | 0.437116 |
+
+
+### Logs
+```python
+2024-02-21 01:09:14,380 P2661590 INFO Params: {
+ "batch_norm": "True",
+ "batch_size": "8192",
+ "block1_dropout": "0.4",
+ "block1_hidden_activations": "ReLU",
+ "block1_hidden_units": "[1000, 1000, 1000]",
+ "block2_dropout": "0.4",
+ "block2_hidden_activations": "ReLU",
+ "block2_hidden_units": "[512]",
+ "block_type": "2B",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x4_001_a5e05ce7",
+ "debug_mode": "False",
+ "early_stop_patience": "2",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'fill_na': 0, 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'preprocess': 'convert_to_bucket', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'fill_na': '', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "feature_specs": "None",
+ "gpu": "0",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'Label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "10",
+ "model": "FinalNet",
+ "model_id": "FinalNet_criteo_x4_001_041_449ccb21",
+ "model_root": "./checkpoints/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "ordered_features": "None",
+ "pickle_feature_encoder": "True",
+ "residual_type": "concat",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x4/test.csv",
+ "train_data": "../data/Criteo/Criteo_x4/train.csv",
+ "use_feature_gating": "True",
+ "valid_data": "../data/Criteo/Criteo_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-21 01:09:14,381 P2661590 INFO Set up feature processor...
+2024-02-21 01:09:14,382 P2661590 WARNING Skip rebuilding ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-21 01:09:14,382 P2661590 INFO Load feature_map from json: ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json
+2024-02-21 01:09:14,382 P2661590 INFO Set column index...
+2024-02-21 01:09:14,383 P2661590 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 1446}",
+ "C10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 39530}",
+ "C11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5131}",
+ "C12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 156656}",
+ "C13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 3176}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 28}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11043}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 148913}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 12}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4560}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2003}",
+ "C2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 554}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 154564}",
+ "C22": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C23": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 17}",
+ "C24": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 53031}",
+ "C25": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 82}",
+ "C26": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 40955}",
+ "C3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 157339}",
+ "C4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 117822}",
+ "C5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 306}",
+ "C6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11882}",
+ "C8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 630}",
+ "C9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "I1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 44}",
+ "I10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "I11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "I12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 37}",
+ "I13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 72}",
+ "I2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 99}",
+ "I3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 122}",
+ "I4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 41}",
+ "I5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 220}",
+ "I6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 112}",
+ "I7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 80}",
+ "I8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 69}",
+ "I9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 92}"
+}
+2024-02-21 01:09:18,212 P2661590 INFO Total number of parameters: 18153050.
+2024-02-21 01:09:18,212 P2661590 INFO Loading datasets...
+2024-02-21 01:09:54,793 P2661590 INFO Train samples: total/36672493, blocks/1
+2024-02-21 01:09:59,444 P2661590 INFO Validation samples: total/4584062, blocks/1
+2024-02-21 01:09:59,444 P2661590 INFO Loading train and validation data done.
+2024-02-21 01:09:59,444 P2661590 INFO Start training: 4477 batches/epoch
+2024-02-21 01:09:59,444 P2661590 INFO ************ Epoch=1 start ************
+2024-02-21 01:14:51,100 P2661590 INFO Train loss: 0.460479
+2024-02-21 01:14:51,100 P2661590 INFO Evaluation @epoch 1 - batch 4477:
+2024-02-21 01:15:05,073 P2661590 INFO [Metrics] AUC: 0.805437
+2024-02-21 01:15:05,074 P2661590 INFO Save best model: monitor(max)=0.805437
+2024-02-21 01:15:05,251 P2661590 INFO ************ Epoch=1 end ************
+2024-02-21 01:19:58,419 P2661590 INFO Train loss: 0.454513
+2024-02-21 01:19:58,419 P2661590 INFO Evaluation @epoch 2 - batch 4477:
+2024-02-21 01:20:12,691 P2661590 INFO [Metrics] AUC: 0.807590
+2024-02-21 01:20:12,692 P2661590 INFO Save best model: monitor(max)=0.807590
+2024-02-21 01:20:12,879 P2661590 INFO ************ Epoch=2 end ************
+2024-02-21 01:25:03,483 P2661590 INFO Train loss: 0.453117
+2024-02-21 01:25:03,484 P2661590 INFO Evaluation @epoch 3 - batch 4477:
+2024-02-21 01:25:17,838 P2661590 INFO [Metrics] AUC: 0.808788
+2024-02-21 01:25:17,840 P2661590 INFO Save best model: monitor(max)=0.808788
+2024-02-21 01:25:18,032 P2661590 INFO ************ Epoch=3 end ************
+2024-02-21 01:30:09,428 P2661590 INFO Train loss: 0.452215
+2024-02-21 01:30:09,429 P2661590 INFO Evaluation @epoch 4 - batch 4477:
+2024-02-21 01:30:23,782 P2661590 INFO [Metrics] AUC: 0.809404
+2024-02-21 01:30:23,783 P2661590 INFO Save best model: monitor(max)=0.809404
+2024-02-21 01:30:23,982 P2661590 INFO ************ Epoch=4 end ************
+2024-02-21 01:35:14,773 P2661590 INFO Train loss: 0.451528
+2024-02-21 01:35:14,773 P2661590 INFO Evaluation @epoch 5 - batch 4477:
+2024-02-21 01:35:29,205 P2661590 INFO [Metrics] AUC: 0.809996
+2024-02-21 01:35:29,207 P2661590 INFO Save best model: monitor(max)=0.809996
+2024-02-21 01:35:29,392 P2661590 INFO ************ Epoch=5 end ************
+2024-02-21 01:40:23,698 P2661590 INFO Train loss: 0.450978
+2024-02-21 01:40:23,699 P2661590 INFO Evaluation @epoch 6 - batch 4477:
+2024-02-21 01:40:37,685 P2661590 INFO [Metrics] AUC: 0.810492
+2024-02-21 01:40:37,690 P2661590 INFO Save best model: monitor(max)=0.810492
+2024-02-21 01:40:37,886 P2661590 INFO ************ Epoch=6 end ************
+2024-02-21 01:45:31,541 P2661590 INFO Train loss: 0.450588
+2024-02-21 01:45:31,542 P2661590 INFO Evaluation @epoch 7 - batch 4477:
+2024-02-21 01:45:45,835 P2661590 INFO [Metrics] AUC: 0.810681
+2024-02-21 01:45:45,836 P2661590 INFO Save best model: monitor(max)=0.810681
+2024-02-21 01:45:46,015 P2661590 INFO ************ Epoch=7 end ************
+2024-02-21 01:50:42,165 P2661590 INFO Train loss: 0.450212
+2024-02-21 01:50:42,165 P2661590 INFO Evaluation @epoch 8 - batch 4477:
+2024-02-21 01:50:56,202 P2661590 INFO [Metrics] AUC: 0.811053
+2024-02-21 01:50:56,206 P2661590 INFO Save best model: monitor(max)=0.811053
+2024-02-21 01:50:56,395 P2661590 INFO ************ Epoch=8 end ************
+2024-02-21 01:55:51,990 P2661590 INFO Train loss: 0.449913
+2024-02-21 01:55:51,990 P2661590 INFO Evaluation @epoch 9 - batch 4477:
+2024-02-21 01:56:05,814 P2661590 INFO [Metrics] AUC: 0.811255
+2024-02-21 01:56:05,815 P2661590 INFO Save best model: monitor(max)=0.811255
+2024-02-21 01:56:06,019 P2661590 INFO ************ Epoch=9 end ************
+2024-02-21 02:01:03,880 P2661590 INFO Train loss: 0.449685
+2024-02-21 02:01:03,881 P2661590 INFO Evaluation @epoch 10 - batch 4477:
+2024-02-21 02:01:17,759 P2661590 INFO [Metrics] AUC: 0.811483
+2024-02-21 02:01:17,761 P2661590 INFO Save best model: monitor(max)=0.811483
+2024-02-21 02:01:17,949 P2661590 INFO ************ Epoch=10 end ************
+2024-02-21 02:06:12,912 P2661590 INFO Train loss: 0.449436
+2024-02-21 02:06:12,913 P2661590 INFO Evaluation @epoch 11 - batch 4477:
+2024-02-21 02:06:26,670 P2661590 INFO [Metrics] AUC: 0.811428
+2024-02-21 02:06:26,671 P2661590 INFO Monitor(max)=0.811428 STOP!
+2024-02-21 02:06:26,671 P2661590 INFO Reduce learning rate on plateau: 0.000100
+2024-02-21 02:06:26,744 P2661590 INFO ************ Epoch=11 end ************
+2024-02-21 02:11:23,197 P2661590 INFO Train loss: 0.439517
+2024-02-21 02:11:23,197 P2661590 INFO Evaluation @epoch 12 - batch 4477:
+2024-02-21 02:11:36,759 P2661590 INFO [Metrics] AUC: 0.814074
+2024-02-21 02:11:36,761 P2661590 INFO Save best model: monitor(max)=0.814074
+2024-02-21 02:11:36,947 P2661590 INFO ************ Epoch=12 end ************
+2024-02-21 02:16:33,347 P2661590 INFO Train loss: 0.435572
+2024-02-21 02:16:33,347 P2661590 INFO Evaluation @epoch 13 - batch 4477:
+2024-02-21 02:16:47,262 P2661590 INFO [Metrics] AUC: 0.814462
+2024-02-21 02:16:47,263 P2661590 INFO Save best model: monitor(max)=0.814462
+2024-02-21 02:16:47,448 P2661590 INFO ************ Epoch=13 end ************
+2024-02-21 02:21:42,706 P2661590 INFO Train loss: 0.433620
+2024-02-21 02:21:42,707 P2661590 INFO Evaluation @epoch 14 - batch 4477:
+2024-02-21 02:21:56,657 P2661590 INFO [Metrics] AUC: 0.814439
+2024-02-21 02:21:56,662 P2661590 INFO Monitor(max)=0.814439 STOP!
+2024-02-21 02:21:56,662 P2661590 INFO Reduce learning rate on plateau: 0.000010
+2024-02-21 02:21:56,737 P2661590 INFO ************ Epoch=14 end ************
+2024-02-21 02:26:51,667 P2661590 INFO Train loss: 0.430005
+2024-02-21 02:26:51,668 P2661590 INFO Evaluation @epoch 15 - batch 4477:
+2024-02-21 02:27:05,795 P2661590 INFO [Metrics] AUC: 0.814147
+2024-02-21 02:27:05,796 P2661590 INFO Monitor(max)=0.814147 STOP!
+2024-02-21 02:27:05,796 P2661590 INFO Reduce learning rate on plateau: 0.000001
+2024-02-21 02:27:05,796 P2661590 INFO ********* Epoch==15 early stop *********
+2024-02-21 02:27:05,869 P2661590 INFO Training finished.
+2024-02-21 02:27:05,869 P2661590 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/criteo_x4_001_a5e05ce7/FinalNet_criteo_x4_001_041_449ccb21.model
+2024-02-21 02:27:05,947 P2661590 INFO ****** Validation evaluation ******
+2024-02-21 02:27:21,204 P2661590 INFO [Metrics] AUC: 0.814462 - logloss: 0.437531
+2024-02-21 02:27:21,319 P2661590 INFO ******** Test evaluation ********
+2024-02-21 02:27:21,320 P2661590 INFO Loading datasets...
+2024-02-21 02:27:25,802 P2661590 INFO Test samples: total/4584062, blocks/1
+2024-02-21 02:27:25,802 P2661590 INFO Loading test data done.
+2024-02-21 02:27:41,261 P2661590 INFO [Metrics] AUC: 0.814966 - logloss: 0.437116
+
+```
diff --git a/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/environments.txt b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/environments.txt
new file mode 100644
index 00000000..b4567ace
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/environments.txt
@@ -0,0 +1,18 @@
+[Hardware]
+CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+GPU: Tesla V100 32G
+RAM: 755G
+
+[Software]
+CUDA: 10.2
+python: 3.7.10
+pytorch: 1.10.2+cu102
+pandas: 1.1.5
+numpy: 1.19.5
+scipy: 1.5.2
+sklearn: 0.22.1
+pyyaml: 6.0.1
+h5py: 2.8.0
+tqdm: 4.64.0
+keras_preprocessing: 1.1.2
+fuxictr: 2.2.0
\ No newline at end of file
diff --git a/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/results.csv b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/results.csv
new file mode 100644
index 00000000..ccfc37e4
--- /dev/null
+++ b/ranking/ctr/FinalNet/FinalNet_criteo_x4_001/results.csv
@@ -0,0 +1 @@
+ 20240221-022741,[command] python run_expid.py --config Criteo_x4/FinalNet_criteo_x4_001/FinalNet_criteo_x4_tuner_config_05 --expid FinalNet_criteo_x4_001_041_449ccb21 --gpu 0,[exp_id] FinalNet_criteo_x4_001_041_449ccb21,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.814462 - logloss: 0.437531,[test] AUC: 0.814966 - logloss: 0.437116
diff --git a/ranking/ctr/FinalNet/FinalNet_frappe_x1/FinalNet_1B_frappe_x1/README.md b/ranking/ctr/FinalNet/FinalNet_frappe_x1/FinalNet_1B_frappe_x1/README.md
index 638cf881..ccb4024c 100644
--- a/ranking/ctr/FinalNet/FinalNet_frappe_x1/FinalNet_1B_frappe_x1/README.md
+++ b/ranking/ctr/FinalNet/FinalNet_frappe_x1/FinalNet_1B_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FINAL model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -38,11 +38,11 @@ Please refer to the BARS dataset [Frappe_x1](https://github.com/openbenchmark/BA
### Code
-We use the [FINAL](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [FINAL](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalNet/FinalNet_frappe_x1/FinalNet_2B_frappe_x1/README.md b/ranking/ctr/FinalNet/FinalNet_frappe_x1/FinalNet_2B_frappe_x1/README.md
index 25fa0016..7c964a32 100644
--- a/ranking/ctr/FinalNet/FinalNet_frappe_x1/FinalNet_2B_frappe_x1/README.md
+++ b/ranking/ctr/FinalNet/FinalNet_frappe_x1/FinalNet_2B_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FINAL model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -38,11 +38,11 @@ Please refer to the BARS dataset [Frappe_x1](https://github.com/openbenchmark/BA
### Code
-We use the [FINAL](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [FINAL](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalNet/FinalNet_movielenslatest_x1/FinalNet_1B_movielenslatest_x1/README.md b/ranking/ctr/FinalNet/FinalNet_movielenslatest_x1/FinalNet_1B_movielenslatest_x1/README.md
index 925d0425..a6fb2a18 100644
--- a/ranking/ctr/FinalNet/FinalNet_movielenslatest_x1/FinalNet_1B_movielenslatest_x1/README.md
+++ b/ranking/ctr/FinalNet/FinalNet_movielenslatest_x1/FinalNet_1B_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FINAL model on the MovielensLatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -38,11 +38,11 @@ Please refer to the BARS dataset [MovielensLatest_x1](https://github.com/openben
### Code
-We use the [FINAL](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [FINAL](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FinalNet/FinalNet_movielenslatest_x1/FinalNet_2B_movielenslatest_x1/README.md b/ranking/ctr/FinalNet/FinalNet_movielenslatest_x1/FinalNet_2B_movielenslatest_x1/README.md
index 95e6089a..16c27d6f 100644
--- a/ranking/ctr/FinalNet/FinalNet_movielenslatest_x1/FinalNet_2B_movielenslatest_x1/README.md
+++ b/ranking/ctr/FinalNet/FinalNet_movielenslatest_x1/FinalNet_2B_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FINAL model on the MovielensLatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -38,11 +38,11 @@ Please refer to the BARS dataset [MovielensLatest_x1](https://github.com/openben
### Code
-We use the [FINAL](https://github.com/xue-pai/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/tree/v2.0.2) for this experiment.
+We use the [FINAL](https://github.com/reczoo/FuxiCTR/blob/v2.0.2/model_zoo/FINAL) model code from [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/tree/v2.0.2) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.2.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FmFM/FmFM_amazonelectronics_x1/README.md b/ranking/ctr/FmFM/FmFM_amazonelectronics_x1/README.md
index e267aada..5f07f6b1 100644
--- a/ranking/ctr/FmFM/FmFM_amazonelectronics_x1/README.md
+++ b/ranking/ctr/FmFM/FmFM_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FmFM model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [FmFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FmFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FmFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FmFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FmFM/FmFM_avazu_x1/README.md b/ranking/ctr/FmFM/FmFM_avazu_x1/README.md
index c5292b9c..bae0bfa6 100644
--- a/ranking/ctr/FmFM/FmFM_avazu_x1/README.md
+++ b/ranking/ctr/FmFM/FmFM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FmFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FmFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FmFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FmFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FmFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FmFM/FmFM_criteo_x1/README.md b/ranking/ctr/FmFM/FmFM_criteo_x1/README.md
index 7b8e745f..87c9dc8c 100644
--- a/ranking/ctr/FmFM/FmFM_criteo_x1/README.md
+++ b/ranking/ctr/FmFM/FmFM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FmFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FmFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FmFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FmFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FmFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FmFM/FmFM_frappe_x1/README.md b/ranking/ctr/FmFM/FmFM_frappe_x1/README.md
index 26163405..ef6064c3 100644
--- a/ranking/ctr/FmFM/FmFM_frappe_x1/README.md
+++ b/ranking/ctr/FmFM/FmFM_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FmFM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FmFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FmFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FmFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FmFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FmFM/FmFM_kuaivideo_x1/README.md b/ranking/ctr/FmFM/FmFM_kuaivideo_x1/README.md
index 4faf2139..155ed95d 100644
--- a/ranking/ctr/FmFM/FmFM_kuaivideo_x1/README.md
+++ b/ranking/ctr/FmFM/FmFM_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FmFM model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [FmFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FmFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FmFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FmFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FmFM/FmFM_microvideo1.7m_x1/README.md b/ranking/ctr/FmFM/FmFM_microvideo1.7m_x1/README.md
index 93882d12..6eb2fc10 100644
--- a/ranking/ctr/FmFM/FmFM_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/FmFM/FmFM_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FmFM model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [FmFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FmFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FmFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FmFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FmFM/FmFM_movielenslatest_x1/README.md b/ranking/ctr/FmFM/FmFM_movielenslatest_x1/README.md
index a4130a79..777afff0 100644
--- a/ranking/ctr/FmFM/FmFM_movielenslatest_x1/README.md
+++ b/ranking/ctr/FmFM/FmFM_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FmFM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FmFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FmFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FmFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FmFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FmFM/FmFM_taobaoad_x1/README.md b/ranking/ctr/FmFM/FmFM_taobaoad_x1/README.md
index 0f2cbdc6..eec6de68 100644
--- a/ranking/ctr/FmFM/FmFM_taobaoad_x1/README.md
+++ b/ranking/ctr/FmFM/FmFM_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FmFM model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [FmFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/FmFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [FmFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/FmFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/FwFM/FwFM_avazu_x1/README.md b/ranking/ctr/FwFM/FwFM_avazu_x1/README.md
index 39c06ad4..41648b00 100644
--- a/ranking/ctr/FwFM/FwFM_avazu_x1/README.md
+++ b/ranking/ctr/FwFM/FwFM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FwFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FwFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FwFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FwFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FwFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FwFM/FwFM_avazu_x4_001/README.md b/ranking/ctr/FwFM/FwFM_avazu_x4_001/README.md
index e43adfb2..6362bac1 100644
--- a/ranking/ctr/FwFM/FwFM_avazu_x4_001/README.md
+++ b/ranking/ctr/FwFM/FwFM_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FwFM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FwFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FwFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FwFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FwFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FwFM/FwFM_avazu_x4_002/README.md b/ranking/ctr/FwFM/FwFM_avazu_x4_002/README.md
index fdd6efbe..f477f1c4 100644
--- a/ranking/ctr/FwFM/FwFM_avazu_x4_002/README.md
+++ b/ranking/ctr/FwFM/FwFM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FwFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FwFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FwFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FwFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FwFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FwFM/FwFM_criteo_x1/README.md b/ranking/ctr/FwFM/FwFM_criteo_x1/README.md
index 6fe86202..83a90c43 100644
--- a/ranking/ctr/FwFM/FwFM_criteo_x1/README.md
+++ b/ranking/ctr/FwFM/FwFM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FwFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FwFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FwFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FwFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FwFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FwFM/FwFM_criteo_x4_001/README.md b/ranking/ctr/FwFM/FwFM_criteo_x4_001/README.md
index b4e28a73..40f8977b 100644
--- a/ranking/ctr/FwFM/FwFM_criteo_x4_001/README.md
+++ b/ranking/ctr/FwFM/FwFM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FwFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FwFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FwFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FwFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FwFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FwFM/FwFM_criteo_x4_002/README.md b/ranking/ctr/FwFM/FwFM_criteo_x4_002/README.md
index ce63cdaf..aebea777 100644
--- a/ranking/ctr/FwFM/FwFM_criteo_x4_002/README.md
+++ b/ranking/ctr/FwFM/FwFM_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FwFM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FwFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FwFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FwFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FwFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FwFM/FwFM_frappe_x1/README.md b/ranking/ctr/FwFM/FwFM_frappe_x1/README.md
index b2178f0c..a3f8646e 100644
--- a/ranking/ctr/FwFM/FwFM_frappe_x1/README.md
+++ b/ranking/ctr/FwFM/FwFM_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FwFM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FwFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FwFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FwFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FwFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FwFM/FwFM_kkbox_x1/README.md b/ranking/ctr/FwFM/FwFM_kkbox_x1/README.md
index cf02868a..e4810176 100644
--- a/ranking/ctr/FwFM/FwFM_kkbox_x1/README.md
+++ b/ranking/ctr/FwFM/FwFM_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the FwFM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FwFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FwFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [FwFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/FwFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/FwFM/FwFM_movielenslatest_x1/README.md b/ranking/ctr/FwFM/FwFM_movielenslatest_x1/README.md
index 83aa86fd..651e96ac 100644
--- a/ranking/ctr/FwFM/FwFM_movielenslatest_x1/README.md
+++ b/ranking/ctr/FwFM/FwFM_movielenslatest_x1/README.md
@@ -1,205 +1,205 @@
-## FwFM_movielenslatest_x1
-
-A hands-on guide to run the FwFM model on the MovielensLatest_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 11.4
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.1.0
- ```
-
-### Dataset
-Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FwFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FwFM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [FwFM_movielenslatest_x1_tuner_config_02](./FwFM_movielenslatest_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd FwFM_movielenslatest_x1
- nohup python run_expid.py --config ./FwFM_movielenslatest_x1_tuner_config_02 --expid FwFM_movielenslatest_x1_006_e527bbd6 --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.955799 | 0.242620 |
-
-
-### Logs
-```python
-2022-01-25 13:54:27,525 P43856 INFO {
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Movielens/",
- "dataset_id": "movielenslatest_x1_cd32d937",
- "debug": "False",
- "embedding_dim": "10",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user_id', 'item_id', 'tag_id'], 'type': 'categorical'}]",
- "gpu": "1",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "linear_type": "FeLV",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "FwFM",
- "model_id": "FwFM_movielenslatest_x1_006_e527bbd6",
- "model_root": "./Movielens/FwFM_movielenslatest_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "num_workers": "3",
- "optimizer": "adam",
- "partition_block_size": "-1",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "regularizer": "5e-06",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
- "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch"
-}
-2022-01-25 13:54:27,526 P43856 INFO Set up feature encoder...
-2022-01-25 13:54:27,526 P43856 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_cd32d937/feature_map.json
-2022-01-25 13:54:27,526 P43856 INFO Loading data...
-2022-01-25 13:54:27,529 P43856 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/train.h5
-2022-01-25 13:54:28,248 P43856 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/valid.h5
-2022-01-25 13:54:28,259 P43856 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
-2022-01-25 13:54:28,259 P43856 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
-2022-01-25 13:54:28,259 P43856 INFO Loading train data done.
-2022-01-25 13:55:10,424 P43856 INFO Total number of parameters: 1804784.
-2022-01-25 13:55:10,424 P43856 INFO Start training: 343 batches/epoch
-2022-01-25 13:55:10,424 P43856 INFO ************ Epoch=1 start ************
-2022-01-25 13:55:26,643 P43856 INFO [Metrics] AUC: 0.857053 - logloss: 0.482405
-2022-01-25 13:55:26,643 P43856 INFO Save best model: monitor(max): 0.857053
-2022-01-25 13:55:26,655 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:55:26,722 P43856 INFO Train loss: 0.605152
-2022-01-25 13:55:26,722 P43856 INFO ************ Epoch=1 end ************
-2022-01-25 13:55:39,308 P43856 INFO [Metrics] AUC: 0.925707 - logloss: 0.334313
-2022-01-25 13:55:39,308 P43856 INFO Save best model: monitor(max): 0.925707
-2022-01-25 13:55:39,318 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:55:39,380 P43856 INFO Train loss: 0.405395
-2022-01-25 13:55:39,380 P43856 INFO ************ Epoch=2 end ************
-2022-01-25 13:55:51,880 P43856 INFO [Metrics] AUC: 0.942385 - logloss: 0.280649
-2022-01-25 13:55:51,880 P43856 INFO Save best model: monitor(max): 0.942385
-2022-01-25 13:55:51,890 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:55:51,963 P43856 INFO Train loss: 0.314460
-2022-01-25 13:55:51,964 P43856 INFO ************ Epoch=3 end ************
-2022-01-25 13:56:04,084 P43856 INFO [Metrics] AUC: 0.948237 - logloss: 0.259972
-2022-01-25 13:56:04,085 P43856 INFO Save best model: monitor(max): 0.948237
-2022-01-25 13:56:04,098 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:56:04,169 P43856 INFO Train loss: 0.276651
-2022-01-25 13:56:04,169 P43856 INFO ************ Epoch=4 end ************
-2022-01-25 13:56:16,649 P43856 INFO [Metrics] AUC: 0.951234 - logloss: 0.249929
-2022-01-25 13:56:16,650 P43856 INFO Save best model: monitor(max): 0.951234
-2022-01-25 13:56:16,664 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:56:16,720 P43856 INFO Train loss: 0.254924
-2022-01-25 13:56:16,720 P43856 INFO ************ Epoch=5 end ************
-2022-01-25 13:56:29,800 P43856 INFO [Metrics] AUC: 0.953068 - logloss: 0.244246
-2022-01-25 13:56:29,801 P43856 INFO Save best model: monitor(max): 0.953068
-2022-01-25 13:56:29,816 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:56:29,889 P43856 INFO Train loss: 0.238786
-2022-01-25 13:56:29,890 P43856 INFO ************ Epoch=6 end ************
-2022-01-25 13:56:41,898 P43856 INFO [Metrics] AUC: 0.954283 - logloss: 0.240922
-2022-01-25 13:56:41,899 P43856 INFO Save best model: monitor(max): 0.954283
-2022-01-25 13:56:41,911 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:56:41,972 P43856 INFO Train loss: 0.225328
-2022-01-25 13:56:41,973 P43856 INFO ************ Epoch=7 end ************
-2022-01-25 13:56:56,363 P43856 INFO [Metrics] AUC: 0.955018 - logloss: 0.239211
-2022-01-25 13:56:56,364 P43856 INFO Save best model: monitor(max): 0.955018
-2022-01-25 13:56:56,374 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:56:56,437 P43856 INFO Train loss: 0.213537
-2022-01-25 13:56:56,437 P43856 INFO ************ Epoch=8 end ************
-2022-01-25 13:57:07,516 P43856 INFO [Metrics] AUC: 0.955468 - logloss: 0.238645
-2022-01-25 13:57:07,516 P43856 INFO Save best model: monitor(max): 0.955468
-2022-01-25 13:57:07,527 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:57:07,589 P43856 INFO Train loss: 0.202920
-2022-01-25 13:57:07,590 P43856 INFO ************ Epoch=9 end ************
-2022-01-25 13:57:19,900 P43856 INFO [Metrics] AUC: 0.955801 - logloss: 0.238970
-2022-01-25 13:57:19,901 P43856 INFO Save best model: monitor(max): 0.955801
-2022-01-25 13:57:19,913 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:57:19,964 P43856 INFO Train loss: 0.193248
-2022-01-25 13:57:19,964 P43856 INFO ************ Epoch=10 end ************
-2022-01-25 13:57:32,037 P43856 INFO [Metrics] AUC: 0.955880 - logloss: 0.240202
-2022-01-25 13:57:32,037 P43856 INFO Save best model: monitor(max): 0.955880
-2022-01-25 13:57:32,049 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:57:32,116 P43856 INFO Train loss: 0.184399
-2022-01-25 13:57:32,116 P43856 INFO ************ Epoch=11 end ************
-2022-01-25 13:57:40,104 P43856 INFO [Metrics] AUC: 0.955890 - logloss: 0.242119
-2022-01-25 13:57:40,105 P43856 INFO Save best model: monitor(max): 0.955890
-2022-01-25 13:57:40,117 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:57:40,179 P43856 INFO Train loss: 0.176247
-2022-01-25 13:57:40,179 P43856 INFO ************ Epoch=12 end ************
-2022-01-25 13:57:49,829 P43856 INFO [Metrics] AUC: 0.955803 - logloss: 0.244583
-2022-01-25 13:57:49,830 P43856 INFO Monitor(max) STOP: 0.955803 !
-2022-01-25 13:57:49,830 P43856 INFO Reduce learning rate on plateau: 0.000100
-2022-01-25 13:57:49,830 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:57:49,905 P43856 INFO Train loss: 0.168766
-2022-01-25 13:57:49,905 P43856 INFO ************ Epoch=13 end ************
-2022-01-25 13:57:56,438 P43856 INFO [Metrics] AUC: 0.955720 - logloss: 0.245005
-2022-01-25 13:57:56,439 P43856 INFO Monitor(max) STOP: 0.955720 !
-2022-01-25 13:57:56,439 P43856 INFO Reduce learning rate on plateau: 0.000010
-2022-01-25 13:57:56,439 P43856 INFO Early stopping at epoch=14
-2022-01-25 13:57:56,439 P43856 INFO --- 343/343 batches finished ---
-2022-01-25 13:57:56,492 P43856 INFO Train loss: 0.157290
-2022-01-25 13:57:56,492 P43856 INFO Training finished.
-2022-01-25 13:57:56,881 P43856 INFO Load best model: /home/XXX/benchmarks/Movielens/FwFM_movielenslatest_x1/movielenslatest_x1_cd32d937/FwFM_movielenslatest_x1_006_e527bbd6.model
-2022-01-25 13:58:22,122 P43856 INFO ****** Validation evaluation ******
-2022-01-25 13:58:25,329 P43856 INFO [Metrics] AUC: 0.955890 - logloss: 0.242119
-2022-01-25 13:58:25,399 P43856 INFO ******** Test evaluation ********
-2022-01-25 13:58:25,399 P43856 INFO Loading data...
-2022-01-25 13:58:25,399 P43856 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/test.h5
-2022-01-25 13:58:25,404 P43856 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
-2022-01-25 13:58:25,404 P43856 INFO Loading test data done.
-2022-01-25 13:58:27,832 P43856 INFO [Metrics] AUC: 0.955799 - logloss: 0.242620
-
-```
+## FwFM_movielenslatest_x1
+
+A hands-on guide to run the FwFM model on the MovielensLatest_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 11.4
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.1.0
+ ```
+
+### Dataset
+Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [FwFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/FwFM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [FwFM_movielenslatest_x1_tuner_config_02](./FwFM_movielenslatest_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd FwFM_movielenslatest_x1
+ nohup python run_expid.py --config ./FwFM_movielenslatest_x1_tuner_config_02 --expid FwFM_movielenslatest_x1_006_e527bbd6 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.955799 | 0.242620 |
+
+
+### Logs
+```python
+2022-01-25 13:54:27,525 P43856 INFO {
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Movielens/",
+ "dataset_id": "movielenslatest_x1_cd32d937",
+ "debug": "False",
+ "embedding_dim": "10",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user_id', 'item_id', 'tag_id'], 'type': 'categorical'}]",
+ "gpu": "1",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "linear_type": "FeLV",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "FwFM",
+ "model_id": "FwFM_movielenslatest_x1_006_e527bbd6",
+ "model_root": "./Movielens/FwFM_movielenslatest_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "partition_block_size": "-1",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "regularizer": "5e-06",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
+ "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch"
+}
+2022-01-25 13:54:27,526 P43856 INFO Set up feature encoder...
+2022-01-25 13:54:27,526 P43856 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_cd32d937/feature_map.json
+2022-01-25 13:54:27,526 P43856 INFO Loading data...
+2022-01-25 13:54:27,529 P43856 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/train.h5
+2022-01-25 13:54:28,248 P43856 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/valid.h5
+2022-01-25 13:54:28,259 P43856 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
+2022-01-25 13:54:28,259 P43856 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
+2022-01-25 13:54:28,259 P43856 INFO Loading train data done.
+2022-01-25 13:55:10,424 P43856 INFO Total number of parameters: 1804784.
+2022-01-25 13:55:10,424 P43856 INFO Start training: 343 batches/epoch
+2022-01-25 13:55:10,424 P43856 INFO ************ Epoch=1 start ************
+2022-01-25 13:55:26,643 P43856 INFO [Metrics] AUC: 0.857053 - logloss: 0.482405
+2022-01-25 13:55:26,643 P43856 INFO Save best model: monitor(max): 0.857053
+2022-01-25 13:55:26,655 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:55:26,722 P43856 INFO Train loss: 0.605152
+2022-01-25 13:55:26,722 P43856 INFO ************ Epoch=1 end ************
+2022-01-25 13:55:39,308 P43856 INFO [Metrics] AUC: 0.925707 - logloss: 0.334313
+2022-01-25 13:55:39,308 P43856 INFO Save best model: monitor(max): 0.925707
+2022-01-25 13:55:39,318 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:55:39,380 P43856 INFO Train loss: 0.405395
+2022-01-25 13:55:39,380 P43856 INFO ************ Epoch=2 end ************
+2022-01-25 13:55:51,880 P43856 INFO [Metrics] AUC: 0.942385 - logloss: 0.280649
+2022-01-25 13:55:51,880 P43856 INFO Save best model: monitor(max): 0.942385
+2022-01-25 13:55:51,890 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:55:51,963 P43856 INFO Train loss: 0.314460
+2022-01-25 13:55:51,964 P43856 INFO ************ Epoch=3 end ************
+2022-01-25 13:56:04,084 P43856 INFO [Metrics] AUC: 0.948237 - logloss: 0.259972
+2022-01-25 13:56:04,085 P43856 INFO Save best model: monitor(max): 0.948237
+2022-01-25 13:56:04,098 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:56:04,169 P43856 INFO Train loss: 0.276651
+2022-01-25 13:56:04,169 P43856 INFO ************ Epoch=4 end ************
+2022-01-25 13:56:16,649 P43856 INFO [Metrics] AUC: 0.951234 - logloss: 0.249929
+2022-01-25 13:56:16,650 P43856 INFO Save best model: monitor(max): 0.951234
+2022-01-25 13:56:16,664 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:56:16,720 P43856 INFO Train loss: 0.254924
+2022-01-25 13:56:16,720 P43856 INFO ************ Epoch=5 end ************
+2022-01-25 13:56:29,800 P43856 INFO [Metrics] AUC: 0.953068 - logloss: 0.244246
+2022-01-25 13:56:29,801 P43856 INFO Save best model: monitor(max): 0.953068
+2022-01-25 13:56:29,816 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:56:29,889 P43856 INFO Train loss: 0.238786
+2022-01-25 13:56:29,890 P43856 INFO ************ Epoch=6 end ************
+2022-01-25 13:56:41,898 P43856 INFO [Metrics] AUC: 0.954283 - logloss: 0.240922
+2022-01-25 13:56:41,899 P43856 INFO Save best model: monitor(max): 0.954283
+2022-01-25 13:56:41,911 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:56:41,972 P43856 INFO Train loss: 0.225328
+2022-01-25 13:56:41,973 P43856 INFO ************ Epoch=7 end ************
+2022-01-25 13:56:56,363 P43856 INFO [Metrics] AUC: 0.955018 - logloss: 0.239211
+2022-01-25 13:56:56,364 P43856 INFO Save best model: monitor(max): 0.955018
+2022-01-25 13:56:56,374 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:56:56,437 P43856 INFO Train loss: 0.213537
+2022-01-25 13:56:56,437 P43856 INFO ************ Epoch=8 end ************
+2022-01-25 13:57:07,516 P43856 INFO [Metrics] AUC: 0.955468 - logloss: 0.238645
+2022-01-25 13:57:07,516 P43856 INFO Save best model: monitor(max): 0.955468
+2022-01-25 13:57:07,527 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:57:07,589 P43856 INFO Train loss: 0.202920
+2022-01-25 13:57:07,590 P43856 INFO ************ Epoch=9 end ************
+2022-01-25 13:57:19,900 P43856 INFO [Metrics] AUC: 0.955801 - logloss: 0.238970
+2022-01-25 13:57:19,901 P43856 INFO Save best model: monitor(max): 0.955801
+2022-01-25 13:57:19,913 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:57:19,964 P43856 INFO Train loss: 0.193248
+2022-01-25 13:57:19,964 P43856 INFO ************ Epoch=10 end ************
+2022-01-25 13:57:32,037 P43856 INFO [Metrics] AUC: 0.955880 - logloss: 0.240202
+2022-01-25 13:57:32,037 P43856 INFO Save best model: monitor(max): 0.955880
+2022-01-25 13:57:32,049 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:57:32,116 P43856 INFO Train loss: 0.184399
+2022-01-25 13:57:32,116 P43856 INFO ************ Epoch=11 end ************
+2022-01-25 13:57:40,104 P43856 INFO [Metrics] AUC: 0.955890 - logloss: 0.242119
+2022-01-25 13:57:40,105 P43856 INFO Save best model: monitor(max): 0.955890
+2022-01-25 13:57:40,117 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:57:40,179 P43856 INFO Train loss: 0.176247
+2022-01-25 13:57:40,179 P43856 INFO ************ Epoch=12 end ************
+2022-01-25 13:57:49,829 P43856 INFO [Metrics] AUC: 0.955803 - logloss: 0.244583
+2022-01-25 13:57:49,830 P43856 INFO Monitor(max) STOP: 0.955803 !
+2022-01-25 13:57:49,830 P43856 INFO Reduce learning rate on plateau: 0.000100
+2022-01-25 13:57:49,830 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:57:49,905 P43856 INFO Train loss: 0.168766
+2022-01-25 13:57:49,905 P43856 INFO ************ Epoch=13 end ************
+2022-01-25 13:57:56,438 P43856 INFO [Metrics] AUC: 0.955720 - logloss: 0.245005
+2022-01-25 13:57:56,439 P43856 INFO Monitor(max) STOP: 0.955720 !
+2022-01-25 13:57:56,439 P43856 INFO Reduce learning rate on plateau: 0.000010
+2022-01-25 13:57:56,439 P43856 INFO Early stopping at epoch=14
+2022-01-25 13:57:56,439 P43856 INFO --- 343/343 batches finished ---
+2022-01-25 13:57:56,492 P43856 INFO Train loss: 0.157290
+2022-01-25 13:57:56,492 P43856 INFO Training finished.
+2022-01-25 13:57:56,881 P43856 INFO Load best model: /home/XXX/benchmarks/Movielens/FwFM_movielenslatest_x1/movielenslatest_x1_cd32d937/FwFM_movielenslatest_x1_006_e527bbd6.model
+2022-01-25 13:58:22,122 P43856 INFO ****** Validation evaluation ******
+2022-01-25 13:58:25,329 P43856 INFO [Metrics] AUC: 0.955890 - logloss: 0.242119
+2022-01-25 13:58:25,399 P43856 INFO ******** Test evaluation ********
+2022-01-25 13:58:25,399 P43856 INFO Loading data...
+2022-01-25 13:58:25,399 P43856 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/test.h5
+2022-01-25 13:58:25,404 P43856 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
+2022-01-25 13:58:25,404 P43856 INFO Loading test data done.
+2022-01-25 13:58:27,832 P43856 INFO [Metrics] AUC: 0.955799 - logloss: 0.242620
+
+```
diff --git a/ranking/ctr/HFM/HFM+_avazu_x1/README.md b/ranking/ctr/HFM/HFM+_avazu_x1/README.md
index ac8a6f0a..4a096e8e 100644
--- a/ranking/ctr/HFM/HFM+_avazu_x1/README.md
+++ b/ranking/ctr/HFM/HFM+_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM+_avazu_x4_001/README.md b/ranking/ctr/HFM/HFM+_avazu_x4_001/README.md
index 92f60df8..d92161fd 100644
--- a/ranking/ctr/HFM/HFM+_avazu_x4_001/README.md
+++ b/ranking/ctr/HFM/HFM+_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM+_avazu_x4_002/README.md b/ranking/ctr/HFM/HFM+_avazu_x4_002/README.md
index 29e48dce..4e25b9cf 100644
--- a/ranking/ctr/HFM/HFM+_avazu_x4_002/README.md
+++ b/ranking/ctr/HFM/HFM+_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM+_criteo_x1/README.md b/ranking/ctr/HFM/HFM+_criteo_x1/README.md
index 02163e52..3c7503ee 100644
--- a/ranking/ctr/HFM/HFM+_criteo_x1/README.md
+++ b/ranking/ctr/HFM/HFM+_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -39,11 +39,11 @@ Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM+_criteo_x4_001/README.md b/ranking/ctr/HFM/HFM+_criteo_x4_001/README.md
index 87d64a83..f4e23511 100644
--- a/ranking/ctr/HFM/HFM+_criteo_x4_001/README.md
+++ b/ranking/ctr/HFM/HFM+_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM+_criteo_x4_002/README.md b/ranking/ctr/HFM/HFM+_criteo_x4_002/README.md
index a32ae1f7..77191297 100644
--- a/ranking/ctr/HFM/HFM+_criteo_x4_002/README.md
+++ b/ranking/ctr/HFM/HFM+_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM+_frappe_x1/README.md b/ranking/ctr/HFM/HFM+_frappe_x1/README.md
index d6f14f11..dc262766 100644
--- a/ranking/ctr/HFM/HFM+_frappe_x1/README.md
+++ b/ranking/ctr/HFM/HFM+_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM+_kkbox_x1/README.md b/ranking/ctr/HFM/HFM+_kkbox_x1/README.md
index 8feac02c..a30de810 100644
--- a/ranking/ctr/HFM/HFM+_kkbox_x1/README.md
+++ b/ranking/ctr/HFM/HFM+_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM+_movielenslatest_x1/README.md b/ranking/ctr/HFM/HFM+_movielenslatest_x1/README.md
index 4baa5fda..d545c4d7 100644
--- a/ranking/ctr/HFM/HFM+_movielenslatest_x1/README.md
+++ b/ranking/ctr/HFM/HFM+_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM_avazu_x1/README.md b/ranking/ctr/HFM/HFM_avazu_x1/README.md
index 61ff0d01..caab4e60 100644
--- a/ranking/ctr/HFM/HFM_avazu_x1/README.md
+++ b/ranking/ctr/HFM/HFM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM_avazu_x4_001/README.md b/ranking/ctr/HFM/HFM_avazu_x4_001/README.md
index 6ccfdee7..63f87f25 100644
--- a/ranking/ctr/HFM/HFM_avazu_x4_001/README.md
+++ b/ranking/ctr/HFM/HFM_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM_avazu_x4_002/README.md b/ranking/ctr/HFM/HFM_avazu_x4_002/README.md
index 92852498..888dbcc1 100644
--- a/ranking/ctr/HFM/HFM_avazu_x4_002/README.md
+++ b/ranking/ctr/HFM/HFM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM_criteo_x1/README.md b/ranking/ctr/HFM/HFM_criteo_x1/README.md
index 2f32aab9..46d6be88 100644
--- a/ranking/ctr/HFM/HFM_criteo_x1/README.md
+++ b/ranking/ctr/HFM/HFM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM_criteo_x4_001/README.md b/ranking/ctr/HFM/HFM_criteo_x4_001/README.md
index 98b72bed..6b29a192 100644
--- a/ranking/ctr/HFM/HFM_criteo_x4_001/README.md
+++ b/ranking/ctr/HFM/HFM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM_criteo_x4_002/README.md b/ranking/ctr/HFM/HFM_criteo_x4_002/README.md
index ba225bac..4f66f2c0 100644
--- a/ranking/ctr/HFM/HFM_criteo_x4_002/README.md
+++ b/ranking/ctr/HFM/HFM_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM_kkbox_x1/README.md b/ranking/ctr/HFM/HFM_kkbox_x1/README.md
index 278419f2..1cb4c7d3 100644
--- a/ranking/ctr/HFM/HFM_kkbox_x1/README.md
+++ b/ranking/ctr/HFM/HFM_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HFM/HFM_movielenslatest_x1/README.md b/ranking/ctr/HFM/HFM_movielenslatest_x1/README.md
index 93195dbe..3db7482e 100644
--- a/ranking/ctr/HFM/HFM_movielenslatest_x1/README.md
+++ b/ranking/ctr/HFM/HFM_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HFM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HOFM/HOFM_avazu_x1/README.md b/ranking/ctr/HOFM/HOFM_avazu_x1/README.md
index 70e7473a..e7f0b407 100644
--- a/ranking/ctr/HOFM/HOFM_avazu_x1/README.md
+++ b/ranking/ctr/HOFM/HOFM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HOFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HOFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HOFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HOFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HOFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HOFM/HOFM_avazu_x4_001/README.md b/ranking/ctr/HOFM/HOFM_avazu_x4_001/README.md
index ed377bdc..0f22fb46 100644
--- a/ranking/ctr/HOFM/HOFM_avazu_x4_001/README.md
+++ b/ranking/ctr/HOFM/HOFM_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HOFM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HOFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HOFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HOFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HOFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HOFM/HOFM_avazu_x4_002/README.md b/ranking/ctr/HOFM/HOFM_avazu_x4_002/README.md
index 12f0cf03..0cd9bed0 100644
--- a/ranking/ctr/HOFM/HOFM_avazu_x4_002/README.md
+++ b/ranking/ctr/HOFM/HOFM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HOFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HOFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HOFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HOFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HOFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HOFM/HOFM_criteo_x1/README.md b/ranking/ctr/HOFM/HOFM_criteo_x1/README.md
index f28c3850..cfe25448 100644
--- a/ranking/ctr/HOFM/HOFM_criteo_x1/README.md
+++ b/ranking/ctr/HOFM/HOFM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HOFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HOFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HOFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HOFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HOFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HOFM/HOFM_criteo_x4_001/README.md b/ranking/ctr/HOFM/HOFM_criteo_x4_001/README.md
index d7fd82c8..6e623753 100644
--- a/ranking/ctr/HOFM/HOFM_criteo_x4_001/README.md
+++ b/ranking/ctr/HOFM/HOFM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HOFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HOFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HOFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HOFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HOFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HOFM/HOFM_criteo_x4_002/README.md b/ranking/ctr/HOFM/HOFM_criteo_x4_002/README.md
index b786f30c..4b833362 100644
--- a/ranking/ctr/HOFM/HOFM_criteo_x4_002/README.md
+++ b/ranking/ctr/HOFM/HOFM_criteo_x4_002/README.md
@@ -1,274 +1,274 @@
-## HOFM_criteo_x4_002
-
-A hands-on guide to run the HOFM model on the Criteo_x4_002 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
- GPU: Tesla V100 32G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.2
- python: 3.6.4
- pytorch: 1.0.0
- pandas: 0.22.0
- numpy: 1.19.2
- scipy: 1.5.4
- sklearn: 0.22.1
- pyyaml: 5.4.1
- h5py: 2.8.0
- tqdm: 4.60.0
- fuxictr: 1.0.2
- ```
-
-### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HOFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HOFM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Criteo/Criteo_x4`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [HOFM_criteo_x4_tuner_config_05](./HOFM_criteo_x4_tuner_config_05). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd HOFM_criteo_x4_002
- nohup python run_expid.py --config ./HOFM_criteo_x4_tuner_config_05 --expid HOFM_criteo_x4_001_a187c06d --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| logloss | AUC |
-|:--------------------:|:--------------------:|
-| 0.440415 | 0.811455 |
-
-
-### Logs
-```python
-2020-02-25 12:03:08,272 P590 INFO {
- "batch_size": "3000",
- "dataset_id": "criteo_x4_001_be98441d",
- "embedding_dim": "[40, 5]",
- "embedding_dropout": "0",
- "epochs": "100",
- "every_x_epochs": "1",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['logloss', 'AUC']",
- "model": "HOFM",
- "model_id": "HOFM_criteo_x4_001_f22c1010",
- "model_root": "./Criteo/HOFM_criteo/",
- "monitor": "{'AUC': 1, 'logloss': -1}",
- "monitor_mode": "max",
- "optimizer": "adam",
- "order": "3",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "regularizer": "1e-05",
- "reuse_embedding": "False",
- "save_best_only": "True",
- "seed": "2019",
- "shuffle": "True",
- "task": "binary_classification",
- "use_hdf5": "True",
- "verbose": "0",
- "workers": "3",
- "data_format": "h5",
- "data_root": "../data/Criteo/",
- "test_data": "../data/Criteo/criteo_x4_001_be98441d/test.h5",
- "train_data": "../data/Criteo/criteo_x4_001_be98441d/train.h5",
- "valid_data": "../data/Criteo/criteo_x4_001_be98441d/valid.h5",
- "version": "pytorch",
- "gpu": "0"
-}
-2020-02-25 12:03:08,280 P590 INFO Set up feature encoder...
-2020-02-25 12:03:08,280 P590 INFO Load feature_map from json: ../data/Criteo/criteo_x4_001_be98441d/feature_map.json
-2020-02-25 12:03:08,280 P590 INFO Loading data...
-2020-02-25 12:03:08,290 P590 INFO Loading data from h5: ../data/Criteo/criteo_x4_001_be98441d/train.h5
-2020-02-25 12:03:13,000 P590 INFO Loading data from h5: ../data/Criteo/criteo_x4_001_be98441d/valid.h5
-2020-02-25 12:03:14,899 P590 INFO Train samples: total/36672493, pos/9396350, neg/27276143, ratio/25.62%
-2020-02-25 12:03:15,125 P590 INFO Validation samples: total/4584062, pos/1174544, neg/3409518, ratio/25.62%
-2020-02-25 12:03:15,125 P590 INFO Loading train data done.
-2020-02-25 12:03:29,826 P590 INFO **** Start training: 12225 batches/epoch ****
-2020-02-25 13:45:03,226 P590 INFO [Metrics] logloss: 0.452741 - AUC: 0.797658
-2020-02-25 13:45:03,310 P590 INFO Save best model: monitor(max): 0.344917
-2020-02-25 13:45:04,674 P590 INFO --- 12225/12225 batches finished ---
-2020-02-25 13:45:04,730 P590 INFO Train loss: 0.482865
-2020-02-25 13:45:04,730 P590 INFO ************ Epoch=1 end ************
-2020-02-25 15:26:52,314 P590 INFO [Metrics] logloss: 0.452367 - AUC: 0.798152
-2020-02-25 15:26:52,398 P590 INFO Save best model: monitor(max): 0.345785
-2020-02-25 15:26:54,534 P590 INFO --- 12225/12225 batches finished ---
-2020-02-25 15:26:54,606 P590 INFO Train loss: 0.481838
-2020-02-25 15:26:54,607 P590 INFO ************ Epoch=2 end ************
-2020-02-25 17:08:26,380 P590 INFO [Metrics] logloss: 0.451993 - AUC: 0.798611
-2020-02-25 17:08:26,470 P590 INFO Save best model: monitor(max): 0.346618
-2020-02-25 17:08:28,883 P590 INFO --- 12225/12225 batches finished ---
-2020-02-25 17:08:28,957 P590 INFO Train loss: 0.481575
-2020-02-25 17:08:28,957 P590 INFO ************ Epoch=3 end ************
-2020-02-25 18:50:21,243 P590 INFO [Metrics] logloss: 0.452063 - AUC: 0.798448
-2020-02-25 18:50:21,371 P590 INFO Monitor(max) STOP: 0.346385 !
-2020-02-25 18:50:21,371 P590 INFO Reduce learning rate on plateau: 0.000100
-2020-02-25 18:50:21,371 P590 INFO --- 12225/12225 batches finished ---
-2020-02-25 18:50:21,436 P590 INFO Train loss: 0.481441
-2020-02-25 18:50:21,436 P590 INFO ************ Epoch=4 end ************
-2020-02-25 20:32:12,415 P590 INFO [Metrics] logloss: 0.444316 - AUC: 0.807045
-2020-02-25 20:32:12,511 P590 INFO Save best model: monitor(max): 0.362729
-2020-02-25 20:32:14,857 P590 INFO --- 12225/12225 batches finished ---
-2020-02-25 20:32:14,924 P590 INFO Train loss: 0.454585
-2020-02-25 20:32:14,924 P590 INFO ************ Epoch=5 end ************
-2020-02-25 22:14:09,762 P590 INFO [Metrics] logloss: 0.443294 - AUC: 0.808196
-2020-02-25 22:14:09,838 P590 INFO Save best model: monitor(max): 0.364902
-2020-02-25 22:14:12,270 P590 INFO --- 12225/12225 batches finished ---
-2020-02-25 22:14:12,339 P590 INFO Train loss: 0.448891
-2020-02-25 22:14:12,339 P590 INFO ************ Epoch=6 end ************
-2020-02-25 23:56:14,466 P590 INFO [Metrics] logloss: 0.442748 - AUC: 0.808801
-2020-02-25 23:56:14,547 P590 INFO Save best model: monitor(max): 0.366052
-2020-02-25 23:56:16,640 P590 INFO --- 12225/12225 batches finished ---
-2020-02-25 23:56:16,716 P590 INFO Train loss: 0.447550
-2020-02-25 23:56:16,716 P590 INFO ************ Epoch=7 end ************
-2020-02-26 01:40:05,889 P590 INFO [Metrics] logloss: 0.442507 - AUC: 0.809088
-2020-02-26 01:40:05,975 P590 INFO Save best model: monitor(max): 0.366581
-2020-02-26 01:40:08,057 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 01:40:08,126 P590 INFO Train loss: 0.446761
-2020-02-26 01:40:08,126 P590 INFO ************ Epoch=8 end ************
-2020-02-26 03:23:05,957 P590 INFO [Metrics] logloss: 0.442221 - AUC: 0.809419
-2020-02-26 03:23:06,059 P590 INFO Save best model: monitor(max): 0.367197
-2020-02-26 03:23:08,382 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 03:23:08,442 P590 INFO Train loss: 0.446199
-2020-02-26 03:23:08,442 P590 INFO ************ Epoch=9 end ************
-2020-02-26 05:05:25,359 P590 INFO [Metrics] logloss: 0.442074 - AUC: 0.809582
-2020-02-26 05:05:25,442 P590 INFO Save best model: monitor(max): 0.367508
-2020-02-26 05:05:28,039 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 05:05:28,133 P590 INFO Train loss: 0.445765
-2020-02-26 05:05:28,133 P590 INFO ************ Epoch=10 end ************
-2020-02-26 06:47:48,549 P590 INFO [Metrics] logloss: 0.441956 - AUC: 0.809719
-2020-02-26 06:47:48,629 P590 INFO Save best model: monitor(max): 0.367764
-2020-02-26 06:47:51,034 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 06:47:51,098 P590 INFO Train loss: 0.445435
-2020-02-26 06:47:51,098 P590 INFO ************ Epoch=11 end ************
-2020-02-26 08:29:45,645 P590 INFO [Metrics] logloss: 0.441873 - AUC: 0.809806
-2020-02-26 08:29:45,722 P590 INFO Save best model: monitor(max): 0.367933
-2020-02-26 08:29:48,007 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 08:29:48,110 P590 INFO Train loss: 0.445150
-2020-02-26 08:29:48,110 P590 INFO ************ Epoch=12 end ************
-2020-02-26 10:11:19,699 P590 INFO [Metrics] logloss: 0.441783 - AUC: 0.809884
-2020-02-26 10:11:19,805 P590 INFO Save best model: monitor(max): 0.368100
-2020-02-26 10:11:21,939 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 10:11:22,006 P590 INFO Train loss: 0.444931
-2020-02-26 10:11:22,006 P590 INFO ************ Epoch=13 end ************
-2020-02-26 11:52:58,495 P590 INFO [Metrics] logloss: 0.441773 - AUC: 0.809927
-2020-02-26 11:52:58,576 P590 INFO Save best model: monitor(max): 0.368153
-2020-02-26 11:53:00,730 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 11:53:00,803 P590 INFO Train loss: 0.444736
-2020-02-26 11:53:00,803 P590 INFO ************ Epoch=14 end ************
-2020-02-26 13:34:29,840 P590 INFO [Metrics] logloss: 0.441717 - AUC: 0.809983
-2020-02-26 13:34:29,969 P590 INFO Save best model: monitor(max): 0.368266
-2020-02-26 13:34:32,075 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 13:34:32,157 P590 INFO Train loss: 0.444588
-2020-02-26 13:34:32,157 P590 INFO ************ Epoch=15 end ************
-2020-02-26 15:16:00,784 P590 INFO [Metrics] logloss: 0.441716 - AUC: 0.809997
-2020-02-26 15:16:00,873 P590 INFO Save best model: monitor(max): 0.368280
-2020-02-26 15:16:03,098 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 15:16:03,179 P590 INFO Train loss: 0.444451
-2020-02-26 15:16:03,179 P590 INFO ************ Epoch=16 end ************
-2020-02-26 16:57:37,487 P590 INFO [Metrics] logloss: 0.441678 - AUC: 0.810023
-2020-02-26 16:57:37,592 P590 INFO Save best model: monitor(max): 0.368345
-2020-02-26 16:57:39,674 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 16:57:39,787 P590 INFO Train loss: 0.444340
-2020-02-26 16:57:39,788 P590 INFO ************ Epoch=17 end ************
-2020-02-26 18:39:10,375 P590 INFO [Metrics] logloss: 0.441627 - AUC: 0.810087
-2020-02-26 18:39:10,455 P590 INFO Save best model: monitor(max): 0.368460
-2020-02-26 18:39:12,582 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 18:39:12,655 P590 INFO Train loss: 0.444234
-2020-02-26 18:39:12,655 P590 INFO ************ Epoch=18 end ************
-2020-02-26 20:21:07,949 P590 INFO [Metrics] logloss: 0.441826 - AUC: 0.809940
-2020-02-26 20:21:08,068 P590 INFO Monitor(max) STOP: 0.368114 !
-2020-02-26 20:21:08,068 P590 INFO Reduce learning rate on plateau: 0.000010
-2020-02-26 20:21:08,068 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 20:21:08,141 P590 INFO Train loss: 0.444146
-2020-02-26 20:21:08,141 P590 INFO ************ Epoch=19 end ************
-2020-02-26 22:02:28,258 P590 INFO [Metrics] logloss: 0.440848 - AUC: 0.810913
-2020-02-26 22:02:28,392 P590 INFO Save best model: monitor(max): 0.370065
-2020-02-26 22:02:30,483 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 22:02:30,570 P590 INFO Train loss: 0.437630
-2020-02-26 22:02:30,570 P590 INFO ************ Epoch=20 end ************
-2020-02-26 23:44:00,608 P590 INFO [Metrics] logloss: 0.440753 - AUC: 0.811035
-2020-02-26 23:44:00,690 P590 INFO Save best model: monitor(max): 0.370282
-2020-02-26 23:44:02,929 P590 INFO --- 12225/12225 batches finished ---
-2020-02-26 23:44:03,020 P590 INFO Train loss: 0.436947
-2020-02-26 23:44:03,020 P590 INFO ************ Epoch=21 end ************
-2020-02-27 01:25:34,359 P590 INFO [Metrics] logloss: 0.440714 - AUC: 0.811063
-2020-02-27 01:25:34,438 P590 INFO Save best model: monitor(max): 0.370349
-2020-02-27 01:25:36,688 P590 INFO --- 12225/12225 batches finished ---
-2020-02-27 01:25:36,753 P590 INFO Train loss: 0.436734
-2020-02-27 01:25:36,753 P590 INFO ************ Epoch=22 end ************
-2020-02-27 03:08:07,282 P590 INFO [Metrics] logloss: 0.440696 - AUC: 0.811085
-2020-02-27 03:08:07,377 P590 INFO Save best model: monitor(max): 0.370389
-2020-02-27 03:08:09,582 P590 INFO --- 12225/12225 batches finished ---
-2020-02-27 03:08:09,661 P590 INFO Train loss: 0.436594
-2020-02-27 03:08:09,661 P590 INFO ************ Epoch=23 end ************
-2020-02-27 04:49:52,450 P590 INFO [Metrics] logloss: 0.440695 - AUC: 0.811095
-2020-02-27 04:49:52,532 P590 INFO Save best model: monitor(max): 0.370400
-2020-02-27 04:49:54,769 P590 INFO --- 12225/12225 batches finished ---
-2020-02-27 04:49:54,853 P590 INFO Train loss: 0.436488
-2020-02-27 04:49:54,853 P590 INFO ************ Epoch=24 end ************
-2020-02-27 06:31:51,431 P590 INFO [Metrics] logloss: 0.440696 - AUC: 0.811087
-2020-02-27 06:31:51,513 P590 INFO Monitor(max) STOP: 0.370391 !
-2020-02-27 06:31:51,513 P590 INFO Reduce learning rate on plateau: 0.000001
-2020-02-27 06:31:51,513 P590 INFO --- 12225/12225 batches finished ---
-2020-02-27 06:31:51,577 P590 INFO Train loss: 0.436397
-2020-02-27 06:31:51,577 P590 INFO ************ Epoch=25 end ************
-2020-02-27 08:13:44,541 P590 INFO [Metrics] logloss: 0.440684 - AUC: 0.811097
-2020-02-27 08:13:44,625 P590 INFO Save best model: monitor(max): 0.370413
-2020-02-27 08:13:46,818 P590 INFO --- 12225/12225 batches finished ---
-2020-02-27 08:13:46,892 P590 INFO Train loss: 0.435255
-2020-02-27 08:13:46,892 P590 INFO ************ Epoch=26 end ************
-2020-02-27 09:55:32,778 P590 INFO [Metrics] logloss: 0.440687 - AUC: 0.811100
-2020-02-27 09:55:32,878 P590 INFO Monitor(max) STOP: 0.370413 !
-2020-02-27 09:55:32,878 P590 INFO Reduce learning rate on plateau: 0.000001
-2020-02-27 09:55:32,878 P590 INFO --- 12225/12225 batches finished ---
-2020-02-27 09:55:32,943 P590 INFO Train loss: 0.435281
-2020-02-27 09:55:32,943 P590 INFO ************ Epoch=27 end ************
-2020-02-27 11:36:50,355 P590 INFO [Metrics] logloss: 0.440687 - AUC: 0.811098
-2020-02-27 11:36:50,456 P590 INFO Monitor(max) STOP: 0.370410 !
-2020-02-27 11:36:50,456 P590 INFO Reduce learning rate on plateau: 0.000001
-2020-02-27 11:36:50,456 P590 INFO Early stopping at epoch=28
-2020-02-27 11:36:50,456 P590 INFO --- 12225/12225 batches finished ---
-2020-02-27 11:36:50,514 P590 INFO Train loss: 0.435295
-2020-02-27 11:36:50,514 P590 INFO Training finished.
-2020-02-27 11:36:50,514 P590 INFO Load best model: /cache/XXX/FuxiCTR/benchmarks/Criteo/HOFM_criteo/criteo_x4_001_be98441d/HOFM_criteo_x4_001_f22c1010_criteo_x4_001_be98441d_model.ckpt
-2020-02-27 11:36:52,151 P590 INFO ****** Train/validation evaluation ******
-2020-02-27 12:00:59,862 P590 INFO [Metrics] logloss: 0.427715 - AUC: 0.825014
-2020-02-27 12:02:13,083 P590 INFO [Metrics] logloss: 0.440684 - AUC: 0.811097
-2020-02-27 12:02:13,270 P590 INFO ******** Test evaluation ********
-2020-02-27 12:02:13,270 P590 INFO Loading data...
-2020-02-27 12:02:13,270 P590 INFO Loading data from h5: ../data/Criteo/criteo_x4_001_be98441d/test.h5
-2020-02-27 12:02:14,369 P590 INFO Test samples: total/4584062, pos/1174544, neg/3409518, ratio/25.62%
-2020-02-27 12:02:14,370 P590 INFO Loading test data done.
-2020-02-27 12:03:26,781 P590 INFO [Metrics] logloss: 0.440415 - AUC: 0.811455
-
-```
+## HOFM_criteo_x4_002
+
+A hands-on guide to run the HOFM model on the Criteo_x4_002 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.6.4
+ pytorch: 1.0.0
+ pandas: 0.22.0
+ numpy: 1.19.2
+ scipy: 1.5.4
+ sklearn: 0.22.1
+ pyyaml: 5.4.1
+ h5py: 2.8.0
+ tqdm: 4.60.0
+ fuxictr: 1.0.2
+ ```
+
+### Dataset
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HOFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HOFM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Criteo/Criteo_x4`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [HOFM_criteo_x4_tuner_config_05](./HOFM_criteo_x4_tuner_config_05). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd HOFM_criteo_x4_002
+ nohup python run_expid.py --config ./HOFM_criteo_x4_tuner_config_05 --expid HOFM_criteo_x4_001_a187c06d --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| logloss | AUC |
+|:--------------------:|:--------------------:|
+| 0.440415 | 0.811455 |
+
+
+### Logs
+```python
+2020-02-25 12:03:08,272 P590 INFO {
+ "batch_size": "3000",
+ "dataset_id": "criteo_x4_001_be98441d",
+ "embedding_dim": "[40, 5]",
+ "embedding_dropout": "0",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['logloss', 'AUC']",
+ "model": "HOFM",
+ "model_id": "HOFM_criteo_x4_001_f22c1010",
+ "model_root": "./Criteo/HOFM_criteo/",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "optimizer": "adam",
+ "order": "3",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "regularizer": "1e-05",
+ "reuse_embedding": "False",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "use_hdf5": "True",
+ "verbose": "0",
+ "workers": "3",
+ "data_format": "h5",
+ "data_root": "../data/Criteo/",
+ "test_data": "../data/Criteo/criteo_x4_001_be98441d/test.h5",
+ "train_data": "../data/Criteo/criteo_x4_001_be98441d/train.h5",
+ "valid_data": "../data/Criteo/criteo_x4_001_be98441d/valid.h5",
+ "version": "pytorch",
+ "gpu": "0"
+}
+2020-02-25 12:03:08,280 P590 INFO Set up feature encoder...
+2020-02-25 12:03:08,280 P590 INFO Load feature_map from json: ../data/Criteo/criteo_x4_001_be98441d/feature_map.json
+2020-02-25 12:03:08,280 P590 INFO Loading data...
+2020-02-25 12:03:08,290 P590 INFO Loading data from h5: ../data/Criteo/criteo_x4_001_be98441d/train.h5
+2020-02-25 12:03:13,000 P590 INFO Loading data from h5: ../data/Criteo/criteo_x4_001_be98441d/valid.h5
+2020-02-25 12:03:14,899 P590 INFO Train samples: total/36672493, pos/9396350, neg/27276143, ratio/25.62%
+2020-02-25 12:03:15,125 P590 INFO Validation samples: total/4584062, pos/1174544, neg/3409518, ratio/25.62%
+2020-02-25 12:03:15,125 P590 INFO Loading train data done.
+2020-02-25 12:03:29,826 P590 INFO **** Start training: 12225 batches/epoch ****
+2020-02-25 13:45:03,226 P590 INFO [Metrics] logloss: 0.452741 - AUC: 0.797658
+2020-02-25 13:45:03,310 P590 INFO Save best model: monitor(max): 0.344917
+2020-02-25 13:45:04,674 P590 INFO --- 12225/12225 batches finished ---
+2020-02-25 13:45:04,730 P590 INFO Train loss: 0.482865
+2020-02-25 13:45:04,730 P590 INFO ************ Epoch=1 end ************
+2020-02-25 15:26:52,314 P590 INFO [Metrics] logloss: 0.452367 - AUC: 0.798152
+2020-02-25 15:26:52,398 P590 INFO Save best model: monitor(max): 0.345785
+2020-02-25 15:26:54,534 P590 INFO --- 12225/12225 batches finished ---
+2020-02-25 15:26:54,606 P590 INFO Train loss: 0.481838
+2020-02-25 15:26:54,607 P590 INFO ************ Epoch=2 end ************
+2020-02-25 17:08:26,380 P590 INFO [Metrics] logloss: 0.451993 - AUC: 0.798611
+2020-02-25 17:08:26,470 P590 INFO Save best model: monitor(max): 0.346618
+2020-02-25 17:08:28,883 P590 INFO --- 12225/12225 batches finished ---
+2020-02-25 17:08:28,957 P590 INFO Train loss: 0.481575
+2020-02-25 17:08:28,957 P590 INFO ************ Epoch=3 end ************
+2020-02-25 18:50:21,243 P590 INFO [Metrics] logloss: 0.452063 - AUC: 0.798448
+2020-02-25 18:50:21,371 P590 INFO Monitor(max) STOP: 0.346385 !
+2020-02-25 18:50:21,371 P590 INFO Reduce learning rate on plateau: 0.000100
+2020-02-25 18:50:21,371 P590 INFO --- 12225/12225 batches finished ---
+2020-02-25 18:50:21,436 P590 INFO Train loss: 0.481441
+2020-02-25 18:50:21,436 P590 INFO ************ Epoch=4 end ************
+2020-02-25 20:32:12,415 P590 INFO [Metrics] logloss: 0.444316 - AUC: 0.807045
+2020-02-25 20:32:12,511 P590 INFO Save best model: monitor(max): 0.362729
+2020-02-25 20:32:14,857 P590 INFO --- 12225/12225 batches finished ---
+2020-02-25 20:32:14,924 P590 INFO Train loss: 0.454585
+2020-02-25 20:32:14,924 P590 INFO ************ Epoch=5 end ************
+2020-02-25 22:14:09,762 P590 INFO [Metrics] logloss: 0.443294 - AUC: 0.808196
+2020-02-25 22:14:09,838 P590 INFO Save best model: monitor(max): 0.364902
+2020-02-25 22:14:12,270 P590 INFO --- 12225/12225 batches finished ---
+2020-02-25 22:14:12,339 P590 INFO Train loss: 0.448891
+2020-02-25 22:14:12,339 P590 INFO ************ Epoch=6 end ************
+2020-02-25 23:56:14,466 P590 INFO [Metrics] logloss: 0.442748 - AUC: 0.808801
+2020-02-25 23:56:14,547 P590 INFO Save best model: monitor(max): 0.366052
+2020-02-25 23:56:16,640 P590 INFO --- 12225/12225 batches finished ---
+2020-02-25 23:56:16,716 P590 INFO Train loss: 0.447550
+2020-02-25 23:56:16,716 P590 INFO ************ Epoch=7 end ************
+2020-02-26 01:40:05,889 P590 INFO [Metrics] logloss: 0.442507 - AUC: 0.809088
+2020-02-26 01:40:05,975 P590 INFO Save best model: monitor(max): 0.366581
+2020-02-26 01:40:08,057 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 01:40:08,126 P590 INFO Train loss: 0.446761
+2020-02-26 01:40:08,126 P590 INFO ************ Epoch=8 end ************
+2020-02-26 03:23:05,957 P590 INFO [Metrics] logloss: 0.442221 - AUC: 0.809419
+2020-02-26 03:23:06,059 P590 INFO Save best model: monitor(max): 0.367197
+2020-02-26 03:23:08,382 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 03:23:08,442 P590 INFO Train loss: 0.446199
+2020-02-26 03:23:08,442 P590 INFO ************ Epoch=9 end ************
+2020-02-26 05:05:25,359 P590 INFO [Metrics] logloss: 0.442074 - AUC: 0.809582
+2020-02-26 05:05:25,442 P590 INFO Save best model: monitor(max): 0.367508
+2020-02-26 05:05:28,039 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 05:05:28,133 P590 INFO Train loss: 0.445765
+2020-02-26 05:05:28,133 P590 INFO ************ Epoch=10 end ************
+2020-02-26 06:47:48,549 P590 INFO [Metrics] logloss: 0.441956 - AUC: 0.809719
+2020-02-26 06:47:48,629 P590 INFO Save best model: monitor(max): 0.367764
+2020-02-26 06:47:51,034 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 06:47:51,098 P590 INFO Train loss: 0.445435
+2020-02-26 06:47:51,098 P590 INFO ************ Epoch=11 end ************
+2020-02-26 08:29:45,645 P590 INFO [Metrics] logloss: 0.441873 - AUC: 0.809806
+2020-02-26 08:29:45,722 P590 INFO Save best model: monitor(max): 0.367933
+2020-02-26 08:29:48,007 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 08:29:48,110 P590 INFO Train loss: 0.445150
+2020-02-26 08:29:48,110 P590 INFO ************ Epoch=12 end ************
+2020-02-26 10:11:19,699 P590 INFO [Metrics] logloss: 0.441783 - AUC: 0.809884
+2020-02-26 10:11:19,805 P590 INFO Save best model: monitor(max): 0.368100
+2020-02-26 10:11:21,939 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 10:11:22,006 P590 INFO Train loss: 0.444931
+2020-02-26 10:11:22,006 P590 INFO ************ Epoch=13 end ************
+2020-02-26 11:52:58,495 P590 INFO [Metrics] logloss: 0.441773 - AUC: 0.809927
+2020-02-26 11:52:58,576 P590 INFO Save best model: monitor(max): 0.368153
+2020-02-26 11:53:00,730 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 11:53:00,803 P590 INFO Train loss: 0.444736
+2020-02-26 11:53:00,803 P590 INFO ************ Epoch=14 end ************
+2020-02-26 13:34:29,840 P590 INFO [Metrics] logloss: 0.441717 - AUC: 0.809983
+2020-02-26 13:34:29,969 P590 INFO Save best model: monitor(max): 0.368266
+2020-02-26 13:34:32,075 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 13:34:32,157 P590 INFO Train loss: 0.444588
+2020-02-26 13:34:32,157 P590 INFO ************ Epoch=15 end ************
+2020-02-26 15:16:00,784 P590 INFO [Metrics] logloss: 0.441716 - AUC: 0.809997
+2020-02-26 15:16:00,873 P590 INFO Save best model: monitor(max): 0.368280
+2020-02-26 15:16:03,098 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 15:16:03,179 P590 INFO Train loss: 0.444451
+2020-02-26 15:16:03,179 P590 INFO ************ Epoch=16 end ************
+2020-02-26 16:57:37,487 P590 INFO [Metrics] logloss: 0.441678 - AUC: 0.810023
+2020-02-26 16:57:37,592 P590 INFO Save best model: monitor(max): 0.368345
+2020-02-26 16:57:39,674 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 16:57:39,787 P590 INFO Train loss: 0.444340
+2020-02-26 16:57:39,788 P590 INFO ************ Epoch=17 end ************
+2020-02-26 18:39:10,375 P590 INFO [Metrics] logloss: 0.441627 - AUC: 0.810087
+2020-02-26 18:39:10,455 P590 INFO Save best model: monitor(max): 0.368460
+2020-02-26 18:39:12,582 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 18:39:12,655 P590 INFO Train loss: 0.444234
+2020-02-26 18:39:12,655 P590 INFO ************ Epoch=18 end ************
+2020-02-26 20:21:07,949 P590 INFO [Metrics] logloss: 0.441826 - AUC: 0.809940
+2020-02-26 20:21:08,068 P590 INFO Monitor(max) STOP: 0.368114 !
+2020-02-26 20:21:08,068 P590 INFO Reduce learning rate on plateau: 0.000010
+2020-02-26 20:21:08,068 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 20:21:08,141 P590 INFO Train loss: 0.444146
+2020-02-26 20:21:08,141 P590 INFO ************ Epoch=19 end ************
+2020-02-26 22:02:28,258 P590 INFO [Metrics] logloss: 0.440848 - AUC: 0.810913
+2020-02-26 22:02:28,392 P590 INFO Save best model: monitor(max): 0.370065
+2020-02-26 22:02:30,483 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 22:02:30,570 P590 INFO Train loss: 0.437630
+2020-02-26 22:02:30,570 P590 INFO ************ Epoch=20 end ************
+2020-02-26 23:44:00,608 P590 INFO [Metrics] logloss: 0.440753 - AUC: 0.811035
+2020-02-26 23:44:00,690 P590 INFO Save best model: monitor(max): 0.370282
+2020-02-26 23:44:02,929 P590 INFO --- 12225/12225 batches finished ---
+2020-02-26 23:44:03,020 P590 INFO Train loss: 0.436947
+2020-02-26 23:44:03,020 P590 INFO ************ Epoch=21 end ************
+2020-02-27 01:25:34,359 P590 INFO [Metrics] logloss: 0.440714 - AUC: 0.811063
+2020-02-27 01:25:34,438 P590 INFO Save best model: monitor(max): 0.370349
+2020-02-27 01:25:36,688 P590 INFO --- 12225/12225 batches finished ---
+2020-02-27 01:25:36,753 P590 INFO Train loss: 0.436734
+2020-02-27 01:25:36,753 P590 INFO ************ Epoch=22 end ************
+2020-02-27 03:08:07,282 P590 INFO [Metrics] logloss: 0.440696 - AUC: 0.811085
+2020-02-27 03:08:07,377 P590 INFO Save best model: monitor(max): 0.370389
+2020-02-27 03:08:09,582 P590 INFO --- 12225/12225 batches finished ---
+2020-02-27 03:08:09,661 P590 INFO Train loss: 0.436594
+2020-02-27 03:08:09,661 P590 INFO ************ Epoch=23 end ************
+2020-02-27 04:49:52,450 P590 INFO [Metrics] logloss: 0.440695 - AUC: 0.811095
+2020-02-27 04:49:52,532 P590 INFO Save best model: monitor(max): 0.370400
+2020-02-27 04:49:54,769 P590 INFO --- 12225/12225 batches finished ---
+2020-02-27 04:49:54,853 P590 INFO Train loss: 0.436488
+2020-02-27 04:49:54,853 P590 INFO ************ Epoch=24 end ************
+2020-02-27 06:31:51,431 P590 INFO [Metrics] logloss: 0.440696 - AUC: 0.811087
+2020-02-27 06:31:51,513 P590 INFO Monitor(max) STOP: 0.370391 !
+2020-02-27 06:31:51,513 P590 INFO Reduce learning rate on plateau: 0.000001
+2020-02-27 06:31:51,513 P590 INFO --- 12225/12225 batches finished ---
+2020-02-27 06:31:51,577 P590 INFO Train loss: 0.436397
+2020-02-27 06:31:51,577 P590 INFO ************ Epoch=25 end ************
+2020-02-27 08:13:44,541 P590 INFO [Metrics] logloss: 0.440684 - AUC: 0.811097
+2020-02-27 08:13:44,625 P590 INFO Save best model: monitor(max): 0.370413
+2020-02-27 08:13:46,818 P590 INFO --- 12225/12225 batches finished ---
+2020-02-27 08:13:46,892 P590 INFO Train loss: 0.435255
+2020-02-27 08:13:46,892 P590 INFO ************ Epoch=26 end ************
+2020-02-27 09:55:32,778 P590 INFO [Metrics] logloss: 0.440687 - AUC: 0.811100
+2020-02-27 09:55:32,878 P590 INFO Monitor(max) STOP: 0.370413 !
+2020-02-27 09:55:32,878 P590 INFO Reduce learning rate on plateau: 0.000001
+2020-02-27 09:55:32,878 P590 INFO --- 12225/12225 batches finished ---
+2020-02-27 09:55:32,943 P590 INFO Train loss: 0.435281
+2020-02-27 09:55:32,943 P590 INFO ************ Epoch=27 end ************
+2020-02-27 11:36:50,355 P590 INFO [Metrics] logloss: 0.440687 - AUC: 0.811098
+2020-02-27 11:36:50,456 P590 INFO Monitor(max) STOP: 0.370410 !
+2020-02-27 11:36:50,456 P590 INFO Reduce learning rate on plateau: 0.000001
+2020-02-27 11:36:50,456 P590 INFO Early stopping at epoch=28
+2020-02-27 11:36:50,456 P590 INFO --- 12225/12225 batches finished ---
+2020-02-27 11:36:50,514 P590 INFO Train loss: 0.435295
+2020-02-27 11:36:50,514 P590 INFO Training finished.
+2020-02-27 11:36:50,514 P590 INFO Load best model: /cache/XXX/FuxiCTR/benchmarks/Criteo/HOFM_criteo/criteo_x4_001_be98441d/HOFM_criteo_x4_001_f22c1010_criteo_x4_001_be98441d_model.ckpt
+2020-02-27 11:36:52,151 P590 INFO ****** Train/validation evaluation ******
+2020-02-27 12:00:59,862 P590 INFO [Metrics] logloss: 0.427715 - AUC: 0.825014
+2020-02-27 12:02:13,083 P590 INFO [Metrics] logloss: 0.440684 - AUC: 0.811097
+2020-02-27 12:02:13,270 P590 INFO ******** Test evaluation ********
+2020-02-27 12:02:13,270 P590 INFO Loading data...
+2020-02-27 12:02:13,270 P590 INFO Loading data from h5: ../data/Criteo/criteo_x4_001_be98441d/test.h5
+2020-02-27 12:02:14,369 P590 INFO Test samples: total/4584062, pos/1174544, neg/3409518, ratio/25.62%
+2020-02-27 12:02:14,370 P590 INFO Loading test data done.
+2020-02-27 12:03:26,781 P590 INFO [Metrics] logloss: 0.440415 - AUC: 0.811455
+
+```
diff --git a/ranking/ctr/HOFM/HOFM_frappe_x1/README.md b/ranking/ctr/HOFM/HOFM_frappe_x1/README.md
index b64cdb3e..c7b4d3dd 100644
--- a/ranking/ctr/HOFM/HOFM_frappe_x1/README.md
+++ b/ranking/ctr/HOFM/HOFM_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HOFM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HOFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HOFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HOFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HOFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HOFM/HOFM_kkbox_x1/README.md b/ranking/ctr/HOFM/HOFM_kkbox_x1/README.md
index e12c7e2f..08cdec6e 100644
--- a/ranking/ctr/HOFM/HOFM_kkbox_x1/README.md
+++ b/ranking/ctr/HOFM/HOFM_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HOFM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HOFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HOFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [HOFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/HOFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/HOFM/HOFM_movielenslatest_x1/README.md b/ranking/ctr/HOFM/HOFM_movielenslatest_x1/README.md
index 3627b6ec..99e30582 100644
--- a/ranking/ctr/HOFM/HOFM_movielenslatest_x1/README.md
+++ b/ranking/ctr/HOFM/HOFM_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the HOFM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HOFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HOFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [HOFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/HOFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/InterHAt/InterHAt_avazu_x4_001/README.md b/ranking/ctr/InterHAt/InterHAt_avazu_x4_001/README.md
index 7108754f..da17488d 100644
--- a/ranking/ctr/InterHAt/InterHAt_avazu_x4_001/README.md
+++ b/ranking/ctr/InterHAt/InterHAt_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the InterHAt model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [InterHAt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/InterHAt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [InterHAt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/InterHAt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/InterHAt/InterHAt_avazu_x4_002/README.md b/ranking/ctr/InterHAt/InterHAt_avazu_x4_002/README.md
index b20fd52a..bb82b511 100644
--- a/ranking/ctr/InterHAt/InterHAt_avazu_x4_002/README.md
+++ b/ranking/ctr/InterHAt/InterHAt_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the InterHAt model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [InterHAt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/InterHAt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [InterHAt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/InterHAt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/InterHAt/InterHAt_criteo_x4_001/README.md b/ranking/ctr/InterHAt/InterHAt_criteo_x4_001/README.md
index f9c03d81..0c73dfbe 100644
--- a/ranking/ctr/InterHAt/InterHAt_criteo_x4_001/README.md
+++ b/ranking/ctr/InterHAt/InterHAt_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the InterHAt model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [InterHAt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/InterHAt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [InterHAt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/InterHAt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/InterHAt/InterHAt_criteo_x4_002/README.md b/ranking/ctr/InterHAt/InterHAt_criteo_x4_002/README.md
index 0a28e6dd..17828e59 100644
--- a/ranking/ctr/InterHAt/InterHAt_criteo_x4_002/README.md
+++ b/ranking/ctr/InterHAt/InterHAt_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the InterHAt model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [InterHAt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/InterHAt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [InterHAt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/InterHAt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/InterHAt/InterHAt_kkbox_x1/README.md b/ranking/ctr/InterHAt/InterHAt_kkbox_x1/README.md
index 0dc5b23b..04682aeb 100644
--- a/ranking/ctr/InterHAt/InterHAt_kkbox_x1/README.md
+++ b/ranking/ctr/InterHAt/InterHAt_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the InterHAt model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/KKBox/README.md#KKBox_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [KKBox_x1](https://github.com/reczoo/Datasets/tree/main/KKBox/KKBox_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [InterHAt](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/InterHAt.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [InterHAt](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/InterHAt.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LR/LR_avazu_x1/README.md b/ranking/ctr/LR/LR_avazu_x1/README.md
index 102d0221..3a93a1bd 100644
--- a/ranking/ctr/LR/LR_avazu_x1/README.md
+++ b/ranking/ctr/LR/LR_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LR model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [LR](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/LR.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [LR](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/LR.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LR/LR_avazu_x4_001/README.md b/ranking/ctr/LR/LR_avazu_x4_001/README.md
index 248b7fa4..3529c4d1 100644
--- a/ranking/ctr/LR/LR_avazu_x4_001/README.md
+++ b/ranking/ctr/LR/LR_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LR model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LR](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LR.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LR](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LR.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LR/LR_avazu_x4_002/README.md b/ranking/ctr/LR/LR_avazu_x4_002/README.md
index b4f21015..1025fdfc 100644
--- a/ranking/ctr/LR/LR_avazu_x4_002/README.md
+++ b/ranking/ctr/LR/LR_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LR model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LR](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LR.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LR](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LR.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LR/LR_criteo_x1/README.md b/ranking/ctr/LR/LR_criteo_x1/README.md
index f478ccbd..b948285f 100644
--- a/ranking/ctr/LR/LR_criteo_x1/README.md
+++ b/ranking/ctr/LR/LR_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LR model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [LR](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/LR.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [LR](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/LR.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LR/LR_criteo_x4_001/README.md b/ranking/ctr/LR/LR_criteo_x4_001/README.md
index d9fcf44b..3ea60670 100644
--- a/ranking/ctr/LR/LR_criteo_x4_001/README.md
+++ b/ranking/ctr/LR/LR_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LR model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LR](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LR.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LR](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LR.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LR/LR_criteo_x4_002/README.md b/ranking/ctr/LR/LR_criteo_x4_002/README.md
index bebb1f28..9f69e39b 100644
--- a/ranking/ctr/LR/LR_criteo_x4_002/README.md
+++ b/ranking/ctr/LR/LR_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LR model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LR](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LR.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LR](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LR.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LR/LR_frappe_x1/README.md b/ranking/ctr/LR/LR_frappe_x1/README.md
index 10737b1c..585a62cf 100644
--- a/ranking/ctr/LR/LR_frappe_x1/README.md
+++ b/ranking/ctr/LR/LR_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LR model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [LR](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/LR.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [LR](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/LR.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LR/LR_kkbox_x1/README.md b/ranking/ctr/LR/LR_kkbox_x1/README.md
index 47aaf24b..5e81219c 100644
--- a/ranking/ctr/LR/LR_kkbox_x1/README.md
+++ b/ranking/ctr/LR/LR_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LR model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/KKBox/README.md#KKBox_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [KKBox_x1](https://github.com/reczoo/Datasets/tree/main/KKBox/KKBox_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LR](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LR.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LR](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LR.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LR/LR_movielenslatest_x1/README.md b/ranking/ctr/LR/LR_movielenslatest_x1/README.md
index 63db836b..32c5f162 100644
--- a/ranking/ctr/LR/LR_movielenslatest_x1/README.md
+++ b/ranking/ctr/LR/LR_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LR model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [LR](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/LR.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [LR](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/LR.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LorentzFM/LorentzFM_avazu_x4_001/README.md b/ranking/ctr/LorentzFM/LorentzFM_avazu_x4_001/README.md
index f7778ef6..2b6303c1 100644
--- a/ranking/ctr/LorentzFM/LorentzFM_avazu_x4_001/README.md
+++ b/ranking/ctr/LorentzFM/LorentzFM_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LorentzFM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LorentzFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LorentzFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LorentzFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LorentzFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LorentzFM/LorentzFM_avazu_x4_002/README.md b/ranking/ctr/LorentzFM/LorentzFM_avazu_x4_002/README.md
index 44f87767..29183119 100644
--- a/ranking/ctr/LorentzFM/LorentzFM_avazu_x4_002/README.md
+++ b/ranking/ctr/LorentzFM/LorentzFM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LorentzFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LorentzFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LorentzFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LorentzFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LorentzFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LorentzFM/LorentzFM_criteo_x4_001/README.md b/ranking/ctr/LorentzFM/LorentzFM_criteo_x4_001/README.md
index 701e3ef1..dd1f1cc4 100644
--- a/ranking/ctr/LorentzFM/LorentzFM_criteo_x4_001/README.md
+++ b/ranking/ctr/LorentzFM/LorentzFM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LorentzFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LorentzFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LorentzFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LorentzFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LorentzFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LorentzFM/LorentzFM_criteo_x4_002/README.md b/ranking/ctr/LorentzFM/LorentzFM_criteo_x4_002/README.md
index 21185719..57148ac0 100644
--- a/ranking/ctr/LorentzFM/LorentzFM_criteo_x4_002/README.md
+++ b/ranking/ctr/LorentzFM/LorentzFM_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the LorentzFM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LorentzFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LorentzFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LorentzFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LorentzFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/LorentzFM/LorentzFM_kkbox_x1/README.md b/ranking/ctr/LorentzFM/LorentzFM_kkbox_x1/README.md
index d450388b..71950d57 100644
--- a/ranking/ctr/LorentzFM/LorentzFM_kkbox_x1/README.md
+++ b/ranking/ctr/LorentzFM/LorentzFM_kkbox_x1/README.md
@@ -1,296 +1,296 @@
-## LorentzFM_kkbox_x1
-
-A hands-on guide to run the LorentzFM model on the KKBox_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
- GPU: Tesla V100 32G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.2
- python: 3.6.4
- pytorch: 1.0.0
- pandas: 0.22.0
- numpy: 1.19.2
- scipy: 1.5.4
- sklearn: 0.22.1
- pyyaml: 5.4.1
- h5py: 2.8.0
- tqdm: 4.60.0
- fuxictr: 1.0.2
- ```
-
-### Dataset
-Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/KKBox#KKBox_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LorentzFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LorentzFM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/KKBox/KKBox_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [LorentzFM_kkbox_x1_tuner_config_02](./LorentzFM_kkbox_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd LorentzFM_kkbox_x1
- nohup python run_expid.py --config ./LorentzFM_kkbox_x1_tuner_config_02 --expid LorentzFM_kkbox_x1_003_0455bdef --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| logloss | AUC |
-|:--------------------:|:--------------------:|
-| 0.518848 | 0.820207 |
-
-
-### Logs
-```python
-2022-03-10 10:17:07,540 P41332 INFO {
- "batch_size": "10000",
- "data_format": "csv",
- "data_root": "../data/KKBox/",
- "dataset_id": "kkbox_x1_227d337d",
- "debug": "False",
- "embedding_dim": "128",
- "embedding_dropout": "0",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'str', 'name': ['msno', 'song_id', 'source_system_tab', 'source_screen_name', 'source_type', 'city', 'gender', 'registered_via', 'language'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'genre_ids', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'artist_name', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'name': 'isrc', 'preprocess': 'extract_country_code', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'bd', 'preprocess': 'bucketize_age', 'type': 'categorical'}]",
- "gpu": "2",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['logloss', 'AUC']",
- "min_categr_count": "10",
- "model": "LorentzFM",
- "model_id": "LorentzFM_kkbox_x1_003_0455bdef",
- "model_root": "./KKBox/LorentzFM_kkbox_x1/",
- "monitor": "{'AUC': 1, 'logloss': -1}",
- "monitor_mode": "max",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "regularizer": "1e-06",
- "save_best_only": "True",
- "seed": "2019",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/KKBox/KKBox_x1/test.csv",
- "train_data": "../data/KKBox/KKBox_x1/train.csv",
- "use_hdf5": "True",
- "valid_data": "../data/KKBox/KKBox_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch",
- "workers": "3"
-}
-2022-03-10 10:17:07,541 P41332 INFO Set up feature encoder...
-2022-03-10 10:17:07,542 P41332 INFO Load feature_encoder from pickle: ../data/KKBox/kkbox_x1_227d337d/feature_encoder.pkl
-2022-03-10 10:17:08,469 P41332 INFO Total number of parameters: 11807616.
-2022-03-10 10:17:08,469 P41332 INFO Loading data...
-2022-03-10 10:17:08,470 P41332 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/train.h5
-2022-03-10 10:17:08,832 P41332 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/valid.h5
-2022-03-10 10:17:09,051 P41332 INFO Train samples: total/5901932, pos/2971724, neg/2930208, ratio/50.35%
-2022-03-10 10:17:09,073 P41332 INFO Validation samples: total/737743, pos/371466, neg/366277, ratio/50.35%
-2022-03-10 10:17:09,073 P41332 INFO Loading train data done.
-2022-03-10 10:17:13,055 P41332 INFO Start training: 591 batches/epoch
-2022-03-10 10:17:13,056 P41332 INFO ************ Epoch=1 start ************
-2022-03-10 10:17:47,611 P41332 INFO [Metrics] logloss: 0.561536 - AUC: 0.781421
-2022-03-10 10:17:47,616 P41332 INFO Save best model: monitor(max): 0.219885
-2022-03-10 10:17:47,942 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:17:47,981 P41332 INFO Train loss: 0.589643
-2022-03-10 10:17:47,981 P41332 INFO ************ Epoch=1 end ************
-2022-03-10 10:18:22,008 P41332 INFO [Metrics] logloss: 0.546223 - AUC: 0.795819
-2022-03-10 10:18:22,009 P41332 INFO Save best model: monitor(max): 0.249596
-2022-03-10 10:18:22,066 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:18:22,108 P41332 INFO Train loss: 0.550875
-2022-03-10 10:18:22,108 P41332 INFO ************ Epoch=2 end ************
-2022-03-10 10:18:56,662 P41332 INFO [Metrics] logloss: 0.538973 - AUC: 0.802626
-2022-03-10 10:18:56,663 P41332 INFO Save best model: monitor(max): 0.263653
-2022-03-10 10:18:56,730 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:18:56,772 P41332 INFO Train loss: 0.536165
-2022-03-10 10:18:56,772 P41332 INFO ************ Epoch=3 end ************
-2022-03-10 10:19:31,345 P41332 INFO [Metrics] logloss: 0.534488 - AUC: 0.806673
-2022-03-10 10:19:31,346 P41332 INFO Save best model: monitor(max): 0.272185
-2022-03-10 10:19:31,406 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:19:31,447 P41332 INFO Train loss: 0.526991
-2022-03-10 10:19:31,447 P41332 INFO ************ Epoch=4 end ************
-2022-03-10 10:20:05,741 P41332 INFO [Metrics] logloss: 0.531310 - AUC: 0.809495
-2022-03-10 10:20:05,742 P41332 INFO Save best model: monitor(max): 0.278185
-2022-03-10 10:20:05,794 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:20:05,847 P41332 INFO Train loss: 0.520549
-2022-03-10 10:20:05,847 P41332 INFO ************ Epoch=5 end ************
-2022-03-10 10:20:41,958 P41332 INFO [Metrics] logloss: 0.529257 - AUC: 0.811305
-2022-03-10 10:20:41,959 P41332 INFO Save best model: monitor(max): 0.282048
-2022-03-10 10:20:42,018 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:20:42,064 P41332 INFO Train loss: 0.516020
-2022-03-10 10:20:42,064 P41332 INFO ************ Epoch=6 end ************
-2022-03-10 10:21:16,199 P41332 INFO [Metrics] logloss: 0.527650 - AUC: 0.812612
-2022-03-10 10:21:16,199 P41332 INFO Save best model: monitor(max): 0.284963
-2022-03-10 10:21:16,259 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:21:16,297 P41332 INFO Train loss: 0.512699
-2022-03-10 10:21:16,297 P41332 INFO ************ Epoch=7 end ************
-2022-03-10 10:21:50,137 P41332 INFO [Metrics] logloss: 0.526577 - AUC: 0.813520
-2022-03-10 10:21:50,137 P41332 INFO Save best model: monitor(max): 0.286943
-2022-03-10 10:21:50,201 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:21:50,238 P41332 INFO Train loss: 0.510222
-2022-03-10 10:21:50,238 P41332 INFO ************ Epoch=8 end ************
-2022-03-10 10:22:23,579 P41332 INFO [Metrics] logloss: 0.525591 - AUC: 0.814428
-2022-03-10 10:22:23,580 P41332 INFO Save best model: monitor(max): 0.288836
-2022-03-10 10:22:23,639 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:22:23,675 P41332 INFO Train loss: 0.508269
-2022-03-10 10:22:23,675 P41332 INFO ************ Epoch=9 end ************
-2022-03-10 10:22:56,807 P41332 INFO [Metrics] logloss: 0.524992 - AUC: 0.814898
-2022-03-10 10:22:56,808 P41332 INFO Save best model: monitor(max): 0.289906
-2022-03-10 10:22:56,868 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:22:56,905 P41332 INFO Train loss: 0.506742
-2022-03-10 10:22:56,905 P41332 INFO ************ Epoch=10 end ************
-2022-03-10 10:23:31,662 P41332 INFO [Metrics] logloss: 0.524255 - AUC: 0.815599
-2022-03-10 10:23:31,663 P41332 INFO Save best model: monitor(max): 0.291344
-2022-03-10 10:23:31,733 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:23:31,780 P41332 INFO Train loss: 0.505529
-2022-03-10 10:23:31,781 P41332 INFO ************ Epoch=11 end ************
-2022-03-10 10:24:04,648 P41332 INFO [Metrics] logloss: 0.523971 - AUC: 0.815822
-2022-03-10 10:24:04,649 P41332 INFO Save best model: monitor(max): 0.291851
-2022-03-10 10:24:04,707 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:24:04,744 P41332 INFO Train loss: 0.504482
-2022-03-10 10:24:04,745 P41332 INFO ************ Epoch=12 end ************
-2022-03-10 10:24:37,700 P41332 INFO [Metrics] logloss: 0.523502 - AUC: 0.816232
-2022-03-10 10:24:37,700 P41332 INFO Save best model: monitor(max): 0.292730
-2022-03-10 10:24:37,756 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:24:37,793 P41332 INFO Train loss: 0.503722
-2022-03-10 10:24:37,793 P41332 INFO ************ Epoch=13 end ************
-2022-03-10 10:25:10,469 P41332 INFO [Metrics] logloss: 0.523206 - AUC: 0.816502
-2022-03-10 10:25:10,470 P41332 INFO Save best model: monitor(max): 0.293295
-2022-03-10 10:25:10,517 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:25:10,555 P41332 INFO Train loss: 0.503005
-2022-03-10 10:25:10,555 P41332 INFO ************ Epoch=14 end ************
-2022-03-10 10:25:43,610 P41332 INFO [Metrics] logloss: 0.522839 - AUC: 0.816770
-2022-03-10 10:25:43,611 P41332 INFO Save best model: monitor(max): 0.293931
-2022-03-10 10:25:43,666 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:25:43,703 P41332 INFO Train loss: 0.502446
-2022-03-10 10:25:43,703 P41332 INFO ************ Epoch=15 end ************
-2022-03-10 10:26:16,525 P41332 INFO [Metrics] logloss: 0.522795 - AUC: 0.816967
-2022-03-10 10:26:16,525 P41332 INFO Save best model: monitor(max): 0.294172
-2022-03-10 10:26:16,581 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:26:16,617 P41332 INFO Train loss: 0.501886
-2022-03-10 10:26:16,618 P41332 INFO ************ Epoch=16 end ************
-2022-03-10 10:26:51,200 P41332 INFO [Metrics] logloss: 0.522369 - AUC: 0.817295
-2022-03-10 10:26:51,201 P41332 INFO Save best model: monitor(max): 0.294926
-2022-03-10 10:26:51,263 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:26:51,307 P41332 INFO Train loss: 0.501423
-2022-03-10 10:26:51,307 P41332 INFO ************ Epoch=17 end ************
-2022-03-10 10:27:25,630 P41332 INFO [Metrics] logloss: 0.522271 - AUC: 0.817492
-2022-03-10 10:27:25,630 P41332 INFO Save best model: monitor(max): 0.295221
-2022-03-10 10:27:25,692 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:27:25,735 P41332 INFO Train loss: 0.501100
-2022-03-10 10:27:25,735 P41332 INFO ************ Epoch=18 end ************
-2022-03-10 10:28:01,813 P41332 INFO [Metrics] logloss: 0.522063 - AUC: 0.817497
-2022-03-10 10:28:01,814 P41332 INFO Save best model: monitor(max): 0.295435
-2022-03-10 10:28:01,873 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:28:01,910 P41332 INFO Train loss: 0.500716
-2022-03-10 10:28:01,910 P41332 INFO ************ Epoch=19 end ************
-2022-03-10 10:28:35,032 P41332 INFO [Metrics] logloss: 0.521953 - AUC: 0.817633
-2022-03-10 10:28:35,033 P41332 INFO Save best model: monitor(max): 0.295680
-2022-03-10 10:28:35,087 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:28:35,123 P41332 INFO Train loss: 0.500465
-2022-03-10 10:28:35,123 P41332 INFO ************ Epoch=20 end ************
-2022-03-10 10:29:10,412 P41332 INFO [Metrics] logloss: 0.521806 - AUC: 0.817835
-2022-03-10 10:29:10,412 P41332 INFO Save best model: monitor(max): 0.296030
-2022-03-10 10:29:10,474 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:29:10,520 P41332 INFO Train loss: 0.500156
-2022-03-10 10:29:10,521 P41332 INFO ************ Epoch=21 end ************
-2022-03-10 10:29:46,792 P41332 INFO [Metrics] logloss: 0.522107 - AUC: 0.817399
-2022-03-10 10:29:46,793 P41332 INFO Monitor(max) STOP: 0.295292 !
-2022-03-10 10:29:46,793 P41332 INFO Reduce learning rate on plateau: 0.000100
-2022-03-10 10:29:46,793 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:29:46,839 P41332 INFO Train loss: 0.499867
-2022-03-10 10:29:46,839 P41332 INFO ************ Epoch=22 end ************
-2022-03-10 10:30:21,586 P41332 INFO [Metrics] logloss: 0.520255 - AUC: 0.819091
-2022-03-10 10:30:21,587 P41332 INFO Save best model: monitor(max): 0.298836
-2022-03-10 10:30:21,638 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:30:21,680 P41332 INFO Train loss: 0.487482
-2022-03-10 10:30:21,680 P41332 INFO ************ Epoch=23 end ************
-2022-03-10 10:30:55,717 P41332 INFO [Metrics] logloss: 0.519822 - AUC: 0.819423
-2022-03-10 10:30:55,717 P41332 INFO Save best model: monitor(max): 0.299601
-2022-03-10 10:30:55,775 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:30:55,814 P41332 INFO Train loss: 0.486410
-2022-03-10 10:30:55,814 P41332 INFO ************ Epoch=24 end ************
-2022-03-10 10:31:30,278 P41332 INFO [Metrics] logloss: 0.519670 - AUC: 0.819569
-2022-03-10 10:31:30,279 P41332 INFO Save best model: monitor(max): 0.299899
-2022-03-10 10:31:30,341 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:31:30,382 P41332 INFO Train loss: 0.485951
-2022-03-10 10:31:30,382 P41332 INFO ************ Epoch=25 end ************
-2022-03-10 10:32:04,384 P41332 INFO [Metrics] logloss: 0.519545 - AUC: 0.819667
-2022-03-10 10:32:04,384 P41332 INFO Save best model: monitor(max): 0.300122
-2022-03-10 10:32:04,441 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:32:04,480 P41332 INFO Train loss: 0.485638
-2022-03-10 10:32:04,480 P41332 INFO ************ Epoch=26 end ************
-2022-03-10 10:32:38,721 P41332 INFO [Metrics] logloss: 0.519545 - AUC: 0.819653
-2022-03-10 10:32:38,722 P41332 INFO Monitor(max) STOP: 0.300108 !
-2022-03-10 10:32:38,722 P41332 INFO Reduce learning rate on plateau: 0.000010
-2022-03-10 10:32:38,722 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:32:38,771 P41332 INFO Train loss: 0.485436
-2022-03-10 10:32:38,771 P41332 INFO ************ Epoch=27 end ************
-2022-03-10 10:33:14,798 P41332 INFO [Metrics] logloss: 0.519510 - AUC: 0.819676
-2022-03-10 10:33:14,799 P41332 INFO Save best model: monitor(max): 0.300166
-2022-03-10 10:33:14,869 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:33:14,919 P41332 INFO Train loss: 0.483624
-2022-03-10 10:33:14,919 P41332 INFO ************ Epoch=28 end ************
-2022-03-10 10:33:47,928 P41332 INFO [Metrics] logloss: 0.519502 - AUC: 0.819681
-2022-03-10 10:33:47,929 P41332 INFO Save best model: monitor(max): 0.300179
-2022-03-10 10:33:47,985 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:33:48,044 P41332 INFO Train loss: 0.483623
-2022-03-10 10:33:48,046 P41332 INFO ************ Epoch=29 end ************
-2022-03-10 10:34:20,613 P41332 INFO [Metrics] logloss: 0.519497 - AUC: 0.819686
-2022-03-10 10:34:20,614 P41332 INFO Save best model: monitor(max): 0.300189
-2022-03-10 10:34:20,671 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:34:20,709 P41332 INFO Train loss: 0.483591
-2022-03-10 10:34:20,709 P41332 INFO ************ Epoch=30 end ************
-2022-03-10 10:34:54,042 P41332 INFO [Metrics] logloss: 0.519496 - AUC: 0.819684
-2022-03-10 10:34:54,043 P41332 INFO Monitor(max) STOP: 0.300188 !
-2022-03-10 10:34:54,043 P41332 INFO Reduce learning rate on plateau: 0.000001
-2022-03-10 10:34:54,043 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:34:54,081 P41332 INFO Train loss: 0.483554
-2022-03-10 10:34:54,081 P41332 INFO ************ Epoch=31 end ************
-2022-03-10 10:35:28,417 P41332 INFO [Metrics] logloss: 0.519495 - AUC: 0.819684
-2022-03-10 10:35:28,418 P41332 INFO Monitor(max) STOP: 0.300189 !
-2022-03-10 10:35:28,418 P41332 INFO Reduce learning rate on plateau: 0.000001
-2022-03-10 10:35:28,419 P41332 INFO Early stopping at epoch=32
-2022-03-10 10:35:28,419 P41332 INFO --- 591/591 batches finished ---
-2022-03-10 10:35:28,460 P41332 INFO Train loss: 0.483399
-2022-03-10 10:35:28,460 P41332 INFO Training finished.
-2022-03-10 10:35:28,460 P41332 INFO Load best model: /cache/FuxiCTR/benchmarks/KKBox/LorentzFM_kkbox_x1/kkbox_x1_227d337d/LorentzFM_kkbox_x1_003_0455bdef_model.ckpt
-2022-03-10 10:35:28,533 P41332 INFO ****** Validation evaluation ******
-2022-03-10 10:35:32,906 P41332 INFO [Metrics] logloss: 0.519497 - AUC: 0.819686
-2022-03-10 10:35:32,970 P41332 INFO ******** Test evaluation ********
-2022-03-10 10:35:32,970 P41332 INFO Loading data...
-2022-03-10 10:35:32,970 P41332 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/test.h5
-2022-03-10 10:35:33,040 P41332 INFO Test samples: total/737743, pos/371466, neg/366277, ratio/50.35%
-2022-03-10 10:35:33,040 P41332 INFO Loading test data done.
-2022-03-10 10:35:37,028 P41332 INFO [Metrics] logloss: 0.518848 - AUC: 0.820207
-
-```
+## LorentzFM_kkbox_x1
+
+A hands-on guide to run the LorentzFM model on the KKBox_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.6.4
+ pytorch: 1.0.0
+ pandas: 0.22.0
+ numpy: 1.19.2
+ scipy: 1.5.4
+ sklearn: 0.22.1
+ pyyaml: 5.4.1
+ h5py: 2.8.0
+ tqdm: 4.60.0
+ fuxictr: 1.0.2
+ ```
+
+### Dataset
+Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/KKBox#KKBox_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [LorentzFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/LorentzFM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/KKBox/KKBox_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [LorentzFM_kkbox_x1_tuner_config_02](./LorentzFM_kkbox_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd LorentzFM_kkbox_x1
+ nohup python run_expid.py --config ./LorentzFM_kkbox_x1_tuner_config_02 --expid LorentzFM_kkbox_x1_003_0455bdef --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| logloss | AUC |
+|:--------------------:|:--------------------:|
+| 0.518848 | 0.820207 |
+
+
+### Logs
+```python
+2022-03-10 10:17:07,540 P41332 INFO {
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/KKBox/",
+ "dataset_id": "kkbox_x1_227d337d",
+ "debug": "False",
+ "embedding_dim": "128",
+ "embedding_dropout": "0",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'str', 'name': ['msno', 'song_id', 'source_system_tab', 'source_screen_name', 'source_type', 'city', 'gender', 'registered_via', 'language'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'genre_ids', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'encoder': 'MaskedSumPooling', 'max_len': 3, 'name': 'artist_name', 'type': 'sequence'}, {'active': True, 'dtype': 'str', 'name': 'isrc', 'preprocess': 'extract_country_code', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'bd', 'preprocess': 'bucketize_age', 'type': 'categorical'}]",
+ "gpu": "2",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['logloss', 'AUC']",
+ "min_categr_count": "10",
+ "model": "LorentzFM",
+ "model_id": "LorentzFM_kkbox_x1_003_0455bdef",
+ "model_root": "./KKBox/LorentzFM_kkbox_x1/",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "regularizer": "1e-06",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/KKBox/KKBox_x1/test.csv",
+ "train_data": "../data/KKBox/KKBox_x1/train.csv",
+ "use_hdf5": "True",
+ "valid_data": "../data/KKBox/KKBox_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch",
+ "workers": "3"
+}
+2022-03-10 10:17:07,541 P41332 INFO Set up feature encoder...
+2022-03-10 10:17:07,542 P41332 INFO Load feature_encoder from pickle: ../data/KKBox/kkbox_x1_227d337d/feature_encoder.pkl
+2022-03-10 10:17:08,469 P41332 INFO Total number of parameters: 11807616.
+2022-03-10 10:17:08,469 P41332 INFO Loading data...
+2022-03-10 10:17:08,470 P41332 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/train.h5
+2022-03-10 10:17:08,832 P41332 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/valid.h5
+2022-03-10 10:17:09,051 P41332 INFO Train samples: total/5901932, pos/2971724, neg/2930208, ratio/50.35%
+2022-03-10 10:17:09,073 P41332 INFO Validation samples: total/737743, pos/371466, neg/366277, ratio/50.35%
+2022-03-10 10:17:09,073 P41332 INFO Loading train data done.
+2022-03-10 10:17:13,055 P41332 INFO Start training: 591 batches/epoch
+2022-03-10 10:17:13,056 P41332 INFO ************ Epoch=1 start ************
+2022-03-10 10:17:47,611 P41332 INFO [Metrics] logloss: 0.561536 - AUC: 0.781421
+2022-03-10 10:17:47,616 P41332 INFO Save best model: monitor(max): 0.219885
+2022-03-10 10:17:47,942 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:17:47,981 P41332 INFO Train loss: 0.589643
+2022-03-10 10:17:47,981 P41332 INFO ************ Epoch=1 end ************
+2022-03-10 10:18:22,008 P41332 INFO [Metrics] logloss: 0.546223 - AUC: 0.795819
+2022-03-10 10:18:22,009 P41332 INFO Save best model: monitor(max): 0.249596
+2022-03-10 10:18:22,066 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:18:22,108 P41332 INFO Train loss: 0.550875
+2022-03-10 10:18:22,108 P41332 INFO ************ Epoch=2 end ************
+2022-03-10 10:18:56,662 P41332 INFO [Metrics] logloss: 0.538973 - AUC: 0.802626
+2022-03-10 10:18:56,663 P41332 INFO Save best model: monitor(max): 0.263653
+2022-03-10 10:18:56,730 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:18:56,772 P41332 INFO Train loss: 0.536165
+2022-03-10 10:18:56,772 P41332 INFO ************ Epoch=3 end ************
+2022-03-10 10:19:31,345 P41332 INFO [Metrics] logloss: 0.534488 - AUC: 0.806673
+2022-03-10 10:19:31,346 P41332 INFO Save best model: monitor(max): 0.272185
+2022-03-10 10:19:31,406 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:19:31,447 P41332 INFO Train loss: 0.526991
+2022-03-10 10:19:31,447 P41332 INFO ************ Epoch=4 end ************
+2022-03-10 10:20:05,741 P41332 INFO [Metrics] logloss: 0.531310 - AUC: 0.809495
+2022-03-10 10:20:05,742 P41332 INFO Save best model: monitor(max): 0.278185
+2022-03-10 10:20:05,794 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:20:05,847 P41332 INFO Train loss: 0.520549
+2022-03-10 10:20:05,847 P41332 INFO ************ Epoch=5 end ************
+2022-03-10 10:20:41,958 P41332 INFO [Metrics] logloss: 0.529257 - AUC: 0.811305
+2022-03-10 10:20:41,959 P41332 INFO Save best model: monitor(max): 0.282048
+2022-03-10 10:20:42,018 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:20:42,064 P41332 INFO Train loss: 0.516020
+2022-03-10 10:20:42,064 P41332 INFO ************ Epoch=6 end ************
+2022-03-10 10:21:16,199 P41332 INFO [Metrics] logloss: 0.527650 - AUC: 0.812612
+2022-03-10 10:21:16,199 P41332 INFO Save best model: monitor(max): 0.284963
+2022-03-10 10:21:16,259 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:21:16,297 P41332 INFO Train loss: 0.512699
+2022-03-10 10:21:16,297 P41332 INFO ************ Epoch=7 end ************
+2022-03-10 10:21:50,137 P41332 INFO [Metrics] logloss: 0.526577 - AUC: 0.813520
+2022-03-10 10:21:50,137 P41332 INFO Save best model: monitor(max): 0.286943
+2022-03-10 10:21:50,201 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:21:50,238 P41332 INFO Train loss: 0.510222
+2022-03-10 10:21:50,238 P41332 INFO ************ Epoch=8 end ************
+2022-03-10 10:22:23,579 P41332 INFO [Metrics] logloss: 0.525591 - AUC: 0.814428
+2022-03-10 10:22:23,580 P41332 INFO Save best model: monitor(max): 0.288836
+2022-03-10 10:22:23,639 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:22:23,675 P41332 INFO Train loss: 0.508269
+2022-03-10 10:22:23,675 P41332 INFO ************ Epoch=9 end ************
+2022-03-10 10:22:56,807 P41332 INFO [Metrics] logloss: 0.524992 - AUC: 0.814898
+2022-03-10 10:22:56,808 P41332 INFO Save best model: monitor(max): 0.289906
+2022-03-10 10:22:56,868 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:22:56,905 P41332 INFO Train loss: 0.506742
+2022-03-10 10:22:56,905 P41332 INFO ************ Epoch=10 end ************
+2022-03-10 10:23:31,662 P41332 INFO [Metrics] logloss: 0.524255 - AUC: 0.815599
+2022-03-10 10:23:31,663 P41332 INFO Save best model: monitor(max): 0.291344
+2022-03-10 10:23:31,733 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:23:31,780 P41332 INFO Train loss: 0.505529
+2022-03-10 10:23:31,781 P41332 INFO ************ Epoch=11 end ************
+2022-03-10 10:24:04,648 P41332 INFO [Metrics] logloss: 0.523971 - AUC: 0.815822
+2022-03-10 10:24:04,649 P41332 INFO Save best model: monitor(max): 0.291851
+2022-03-10 10:24:04,707 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:24:04,744 P41332 INFO Train loss: 0.504482
+2022-03-10 10:24:04,745 P41332 INFO ************ Epoch=12 end ************
+2022-03-10 10:24:37,700 P41332 INFO [Metrics] logloss: 0.523502 - AUC: 0.816232
+2022-03-10 10:24:37,700 P41332 INFO Save best model: monitor(max): 0.292730
+2022-03-10 10:24:37,756 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:24:37,793 P41332 INFO Train loss: 0.503722
+2022-03-10 10:24:37,793 P41332 INFO ************ Epoch=13 end ************
+2022-03-10 10:25:10,469 P41332 INFO [Metrics] logloss: 0.523206 - AUC: 0.816502
+2022-03-10 10:25:10,470 P41332 INFO Save best model: monitor(max): 0.293295
+2022-03-10 10:25:10,517 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:25:10,555 P41332 INFO Train loss: 0.503005
+2022-03-10 10:25:10,555 P41332 INFO ************ Epoch=14 end ************
+2022-03-10 10:25:43,610 P41332 INFO [Metrics] logloss: 0.522839 - AUC: 0.816770
+2022-03-10 10:25:43,611 P41332 INFO Save best model: monitor(max): 0.293931
+2022-03-10 10:25:43,666 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:25:43,703 P41332 INFO Train loss: 0.502446
+2022-03-10 10:25:43,703 P41332 INFO ************ Epoch=15 end ************
+2022-03-10 10:26:16,525 P41332 INFO [Metrics] logloss: 0.522795 - AUC: 0.816967
+2022-03-10 10:26:16,525 P41332 INFO Save best model: monitor(max): 0.294172
+2022-03-10 10:26:16,581 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:26:16,617 P41332 INFO Train loss: 0.501886
+2022-03-10 10:26:16,618 P41332 INFO ************ Epoch=16 end ************
+2022-03-10 10:26:51,200 P41332 INFO [Metrics] logloss: 0.522369 - AUC: 0.817295
+2022-03-10 10:26:51,201 P41332 INFO Save best model: monitor(max): 0.294926
+2022-03-10 10:26:51,263 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:26:51,307 P41332 INFO Train loss: 0.501423
+2022-03-10 10:26:51,307 P41332 INFO ************ Epoch=17 end ************
+2022-03-10 10:27:25,630 P41332 INFO [Metrics] logloss: 0.522271 - AUC: 0.817492
+2022-03-10 10:27:25,630 P41332 INFO Save best model: monitor(max): 0.295221
+2022-03-10 10:27:25,692 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:27:25,735 P41332 INFO Train loss: 0.501100
+2022-03-10 10:27:25,735 P41332 INFO ************ Epoch=18 end ************
+2022-03-10 10:28:01,813 P41332 INFO [Metrics] logloss: 0.522063 - AUC: 0.817497
+2022-03-10 10:28:01,814 P41332 INFO Save best model: monitor(max): 0.295435
+2022-03-10 10:28:01,873 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:28:01,910 P41332 INFO Train loss: 0.500716
+2022-03-10 10:28:01,910 P41332 INFO ************ Epoch=19 end ************
+2022-03-10 10:28:35,032 P41332 INFO [Metrics] logloss: 0.521953 - AUC: 0.817633
+2022-03-10 10:28:35,033 P41332 INFO Save best model: monitor(max): 0.295680
+2022-03-10 10:28:35,087 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:28:35,123 P41332 INFO Train loss: 0.500465
+2022-03-10 10:28:35,123 P41332 INFO ************ Epoch=20 end ************
+2022-03-10 10:29:10,412 P41332 INFO [Metrics] logloss: 0.521806 - AUC: 0.817835
+2022-03-10 10:29:10,412 P41332 INFO Save best model: monitor(max): 0.296030
+2022-03-10 10:29:10,474 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:29:10,520 P41332 INFO Train loss: 0.500156
+2022-03-10 10:29:10,521 P41332 INFO ************ Epoch=21 end ************
+2022-03-10 10:29:46,792 P41332 INFO [Metrics] logloss: 0.522107 - AUC: 0.817399
+2022-03-10 10:29:46,793 P41332 INFO Monitor(max) STOP: 0.295292 !
+2022-03-10 10:29:46,793 P41332 INFO Reduce learning rate on plateau: 0.000100
+2022-03-10 10:29:46,793 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:29:46,839 P41332 INFO Train loss: 0.499867
+2022-03-10 10:29:46,839 P41332 INFO ************ Epoch=22 end ************
+2022-03-10 10:30:21,586 P41332 INFO [Metrics] logloss: 0.520255 - AUC: 0.819091
+2022-03-10 10:30:21,587 P41332 INFO Save best model: monitor(max): 0.298836
+2022-03-10 10:30:21,638 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:30:21,680 P41332 INFO Train loss: 0.487482
+2022-03-10 10:30:21,680 P41332 INFO ************ Epoch=23 end ************
+2022-03-10 10:30:55,717 P41332 INFO [Metrics] logloss: 0.519822 - AUC: 0.819423
+2022-03-10 10:30:55,717 P41332 INFO Save best model: monitor(max): 0.299601
+2022-03-10 10:30:55,775 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:30:55,814 P41332 INFO Train loss: 0.486410
+2022-03-10 10:30:55,814 P41332 INFO ************ Epoch=24 end ************
+2022-03-10 10:31:30,278 P41332 INFO [Metrics] logloss: 0.519670 - AUC: 0.819569
+2022-03-10 10:31:30,279 P41332 INFO Save best model: monitor(max): 0.299899
+2022-03-10 10:31:30,341 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:31:30,382 P41332 INFO Train loss: 0.485951
+2022-03-10 10:31:30,382 P41332 INFO ************ Epoch=25 end ************
+2022-03-10 10:32:04,384 P41332 INFO [Metrics] logloss: 0.519545 - AUC: 0.819667
+2022-03-10 10:32:04,384 P41332 INFO Save best model: monitor(max): 0.300122
+2022-03-10 10:32:04,441 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:32:04,480 P41332 INFO Train loss: 0.485638
+2022-03-10 10:32:04,480 P41332 INFO ************ Epoch=26 end ************
+2022-03-10 10:32:38,721 P41332 INFO [Metrics] logloss: 0.519545 - AUC: 0.819653
+2022-03-10 10:32:38,722 P41332 INFO Monitor(max) STOP: 0.300108 !
+2022-03-10 10:32:38,722 P41332 INFO Reduce learning rate on plateau: 0.000010
+2022-03-10 10:32:38,722 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:32:38,771 P41332 INFO Train loss: 0.485436
+2022-03-10 10:32:38,771 P41332 INFO ************ Epoch=27 end ************
+2022-03-10 10:33:14,798 P41332 INFO [Metrics] logloss: 0.519510 - AUC: 0.819676
+2022-03-10 10:33:14,799 P41332 INFO Save best model: monitor(max): 0.300166
+2022-03-10 10:33:14,869 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:33:14,919 P41332 INFO Train loss: 0.483624
+2022-03-10 10:33:14,919 P41332 INFO ************ Epoch=28 end ************
+2022-03-10 10:33:47,928 P41332 INFO [Metrics] logloss: 0.519502 - AUC: 0.819681
+2022-03-10 10:33:47,929 P41332 INFO Save best model: monitor(max): 0.300179
+2022-03-10 10:33:47,985 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:33:48,044 P41332 INFO Train loss: 0.483623
+2022-03-10 10:33:48,046 P41332 INFO ************ Epoch=29 end ************
+2022-03-10 10:34:20,613 P41332 INFO [Metrics] logloss: 0.519497 - AUC: 0.819686
+2022-03-10 10:34:20,614 P41332 INFO Save best model: monitor(max): 0.300189
+2022-03-10 10:34:20,671 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:34:20,709 P41332 INFO Train loss: 0.483591
+2022-03-10 10:34:20,709 P41332 INFO ************ Epoch=30 end ************
+2022-03-10 10:34:54,042 P41332 INFO [Metrics] logloss: 0.519496 - AUC: 0.819684
+2022-03-10 10:34:54,043 P41332 INFO Monitor(max) STOP: 0.300188 !
+2022-03-10 10:34:54,043 P41332 INFO Reduce learning rate on plateau: 0.000001
+2022-03-10 10:34:54,043 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:34:54,081 P41332 INFO Train loss: 0.483554
+2022-03-10 10:34:54,081 P41332 INFO ************ Epoch=31 end ************
+2022-03-10 10:35:28,417 P41332 INFO [Metrics] logloss: 0.519495 - AUC: 0.819684
+2022-03-10 10:35:28,418 P41332 INFO Monitor(max) STOP: 0.300189 !
+2022-03-10 10:35:28,418 P41332 INFO Reduce learning rate on plateau: 0.000001
+2022-03-10 10:35:28,419 P41332 INFO Early stopping at epoch=32
+2022-03-10 10:35:28,419 P41332 INFO --- 591/591 batches finished ---
+2022-03-10 10:35:28,460 P41332 INFO Train loss: 0.483399
+2022-03-10 10:35:28,460 P41332 INFO Training finished.
+2022-03-10 10:35:28,460 P41332 INFO Load best model: /cache/FuxiCTR/benchmarks/KKBox/LorentzFM_kkbox_x1/kkbox_x1_227d337d/LorentzFM_kkbox_x1_003_0455bdef_model.ckpt
+2022-03-10 10:35:28,533 P41332 INFO ****** Validation evaluation ******
+2022-03-10 10:35:32,906 P41332 INFO [Metrics] logloss: 0.519497 - AUC: 0.819686
+2022-03-10 10:35:32,970 P41332 INFO ******** Test evaluation ********
+2022-03-10 10:35:32,970 P41332 INFO Loading data...
+2022-03-10 10:35:32,970 P41332 INFO Loading data from h5: ../data/KKBox/kkbox_x1_227d337d/test.h5
+2022-03-10 10:35:33,040 P41332 INFO Test samples: total/737743, pos/371466, neg/366277, ratio/50.35%
+2022-03-10 10:35:33,040 P41332 INFO Loading test data done.
+2022-03-10 10:35:37,028 P41332 INFO [Metrics] logloss: 0.518848 - AUC: 0.820207
+
+```
diff --git a/ranking/ctr/MaskNet/MaskNet_avazu_x1/README.md b/ranking/ctr/MaskNet/MaskNet_avazu_x1/README.md
index 098d602b..a9b773b8 100644
--- a/ranking/ctr/MaskNet/MaskNet_avazu_x1/README.md
+++ b/ranking/ctr/MaskNet/MaskNet_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the MaskNet model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -39,11 +39,11 @@ Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [MaskNet](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/MaskNet.py).
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [MaskNet](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/MaskNet.py).
Running steps:
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_001_019_541571c0.log b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_001_019_541571c0.log
new file mode 100644
index 00000000..33761d8d
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_001_019_541571c0.log
@@ -0,0 +1,106 @@
+2024-02-22 20:06:23,961 P316688 INFO Params: {
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x4_001_a31210da",
+ "debug_mode": "False",
+ "dnn_hidden_activations": "relu",
+ "dnn_hidden_units": "[2000, 2000, 2000]",
+ "early_stop_patience": "1",
+ "emb_layernorm": "False",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-09",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': False, 'dtype': 'str', 'name': 'id', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'hour', 'preprocess': 'convert_hour', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': ['C1', 'banner_pos', 'site_id', 'site_domain', 'site_category', 'app_id', 'app_domain', 'app_category', 'device_id', 'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekday', 'preprocess': 'convert_weekday', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekend', 'preprocess': 'convert_weekend', 'type': 'categorical'}]",
+ "feature_config": "None",
+ "feature_specs": "None",
+ "gpu": "1",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'click'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "2",
+ "model": "MaskNet",
+ "model_id": "MaskNet_avazu_x4_001_019_541571c0",
+ "model_root": "./checkpoints/",
+ "model_type": "ParallelMaskNet",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_dropout": "0",
+ "net_layernorm": "False",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "parallel_block_dim": "500",
+ "parallel_num_blocks": "6",
+ "pickle_feature_encoder": "True",
+ "reduction_ratio": "0.8",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x4/test.csv",
+ "train_data": "../data/Avazu/Avazu_x4/train.csv",
+ "use_features": "None",
+ "valid_data": "../data/Avazu/Avazu_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-22 20:06:23,962 P316688 INFO Set up feature processor...
+2024-02-22 20:06:23,963 P316688 WARNING Skip rebuilding ../data/Avazu/avazu_x4_001_a31210da/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-22 20:06:23,963 P316688 INFO Load feature_map from json: ../data/Avazu/avazu_x4_001_a31210da/feature_map.json
+2024-02-22 20:06:23,963 P316688 INFO Set column index...
+2024-02-22 20:06:23,963 P316688 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2556}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 10}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 434}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 70}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 173}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 62}",
+ "app_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 33}",
+ "app_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 400}",
+ "app_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6545}",
+ "banner_pos": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "device_conn_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "device_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 820509}",
+ "device_ip": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2903322}",
+ "device_model": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7259}",
+ "device_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7}",
+ "hour": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 26}",
+ "site_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "site_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5461}",
+ "site_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4051}",
+ "weekday": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "weekend": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4}"
+}
+2024-02-22 20:06:28,906 P316688 INFO Total number of parameters: 76594787.
+2024-02-22 20:06:28,907 P316688 INFO Loading datasets...
+2024-02-22 20:06:49,843 P316688 INFO Train samples: total/32343172, blocks/1
+2024-02-22 20:06:52,314 P316688 INFO Validation samples: total/4042897, blocks/1
+2024-02-22 20:06:52,314 P316688 INFO Loading train and validation data done.
+2024-02-22 20:06:52,314 P316688 INFO Start training: 3235 batches/epoch
+2024-02-22 20:06:52,314 P316688 INFO ************ Epoch=1 start ************
+2024-02-22 20:13:41,048 P316688 INFO Train loss: 0.379669
+2024-02-22 20:13:41,048 P316688 INFO Evaluation @epoch 1 - batch 3235:
+2024-02-22 20:14:00,960 P316688 INFO [Metrics] AUC: 0.794253 - logloss: 0.371254
+2024-02-22 20:14:00,963 P316688 INFO Save best model: monitor(max)=0.423000
+2024-02-22 20:14:01,495 P316688 INFO ************ Epoch=1 end ************
+2024-02-22 20:20:51,541 P316688 INFO Train loss: 0.329410
+2024-02-22 20:20:51,542 P316688 INFO Evaluation @epoch 2 - batch 3235:
+2024-02-22 20:21:11,342 P316688 INFO [Metrics] AUC: 0.788830 - logloss: 0.379190
+2024-02-22 20:21:11,346 P316688 INFO Monitor(max)=0.409639 STOP!
+2024-02-22 20:21:11,346 P316688 INFO Reduce learning rate on plateau: 0.000100
+2024-02-22 20:21:11,346 P316688 INFO ********* Epoch==2 early stop *********
+2024-02-22 20:21:11,397 P316688 INFO Training finished.
+2024-02-22 20:21:11,397 P316688 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/avazu_x4_001_a31210da/MaskNet_avazu_x4_001_019_541571c0.model
+2024-02-22 20:21:11,641 P316688 INFO ****** Validation evaluation ******
+2024-02-22 20:21:31,510 P316688 INFO [Metrics] AUC: 0.794253 - logloss: 0.371254
+2024-02-22 20:21:31,627 P316688 INFO ******** Test evaluation ********
+2024-02-22 20:21:31,627 P316688 INFO Loading datasets...
+2024-02-22 20:21:34,114 P316688 INFO Test samples: total/4042898, blocks/1
+2024-02-22 20:21:34,114 P316688 INFO Loading test data done.
+2024-02-22 20:21:53,769 P316688 INFO [Metrics] AUC: 0.794382 - logloss: 0.371189
diff --git a/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03.csv b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03.csv
new file mode 100644
index 00000000..a9b1c6b3
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03.csv
@@ -0,0 +1,36 @@
+ 20240222-202153,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_019_541571c0 --gpu 1,[exp_id] MaskNet_avazu_x4_001_019_541571c0,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.794253 - logloss: 0.371254,[test] AUC: 0.794382 - logloss: 0.371189
+ 20240222-195437,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_001_bbac6c11 --gpu 0,[exp_id] MaskNet_avazu_x4_001_001_bbac6c11,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.794150 - logloss: 0.371311,[test] AUC: 0.794240 - logloss: 0.371264
+ 20240222-200940,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_016_50685b2e --gpu 0,[exp_id] MaskNet_avazu_x4_001_016_50685b2e,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793962 - logloss: 0.371618,[test] AUC: 0.794158 - logloss: 0.371490
+ 20240222-204812,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_034_6960833c --gpu 6,[exp_id] MaskNet_avazu_x4_001_034_6960833c,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.794016 - logloss: 0.371616,[test] AUC: 0.794154 - logloss: 0.371523
+ 20240222-204020,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_029_20693171 --gpu 2,[exp_id] MaskNet_avazu_x4_001_029_20693171,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.794018 - logloss: 0.371544,[test] AUC: 0.794146 - logloss: 0.371443
+ 20240222-200935,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_011_a867ff31 --gpu 3,[exp_id] MaskNet_avazu_x4_001_011_a867ff31,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793882 - logloss: 0.371669,[test] AUC: 0.794082 - logloss: 0.371531
+ 20240222-204851,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_035_5dda1c36 --gpu 0,[exp_id] MaskNet_avazu_x4_001_035_5dda1c36,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793837 - logloss: 0.371590,[test] AUC: 0.794034 - logloss: 0.371450
+ 20240222-202142,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_022_f6170749 --gpu 4,[exp_id] MaskNet_avazu_x4_001_022_f6170749,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793759 - logloss: 0.371525,[test] AUC: 0.794026 - logloss: 0.371305
+ 20240222-204111,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_028_7b840e59 --gpu 3,[exp_id] MaskNet_avazu_x4_001_028_7b840e59,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793754 - logloss: 0.371663,[test] AUC: 0.794021 - logloss: 0.371438
+ 20240222-201943,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_018_1738e59f --gpu 6,[exp_id] MaskNet_avazu_x4_001_018_1738e59f,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793839 - logloss: 0.371828,[test] AUC: 0.794007 - logloss: 0.371701
+ 20240222-205022,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_036_66863184 --gpu 1,[exp_id] MaskNet_avazu_x4_001_036_66863184,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793800 - logloss: 0.371824,[test] AUC: 0.794001 - logloss: 0.371687
+ 20240222-201639,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_017_97d6e691 --gpu 5,[exp_id] MaskNet_avazu_x4_001_017_97d6e691,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793755 - logloss: 0.371646,[test] AUC: 0.793956 - logloss: 0.371512
+ 20240222-195115,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_004_4c10b76a --gpu 3,[exp_id] MaskNet_avazu_x4_001_004_4c10b76a,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793693 - logloss: 0.371509,[test] AUC: 0.793947 - logloss: 0.371314
+ 20240222-203601,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_031_46003868 --gpu 1,[exp_id] MaskNet_avazu_x4_001_031_46003868,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793689 - logloss: 0.371473,[test] AUC: 0.793895 - logloss: 0.371334
+ 20240222-204000,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_030_63a9ed56 --gpu 4,[exp_id] MaskNet_avazu_x4_001_030_63a9ed56,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793627 - logloss: 0.371633,[test] AUC: 0.793890 - logloss: 0.371488
+ 20240222-203356,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_027_16be52ea --gpu 0,[exp_id] MaskNet_avazu_x4_001_027_16be52ea,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793742 - logloss: 0.371847,[test] AUC: 0.793885 - logloss: 0.371763
+ 20240222-200924,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_010_c00b0af8 --gpu 4,[exp_id] MaskNet_avazu_x4_001_010_c00b0af8,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793656 - logloss: 0.371753,[test] AUC: 0.793863 - logloss: 0.371583
+ 20240222-200918,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_012_7d3eddf1 --gpu 7,[exp_id] MaskNet_avazu_x4_001_012_7d3eddf1,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793687 - logloss: 0.371615,[test] AUC: 0.793847 - logloss: 0.371510
+ 20240222-200544,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_013_c06e0e42 --gpu 6,[exp_id] MaskNet_avazu_x4_001_013_c06e0e42,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793611 - logloss: 0.371538,[test] AUC: 0.793837 - logloss: 0.371379
+ 20240222-203239,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_026_eff99881 --gpu 6,[exp_id] MaskNet_avazu_x4_001_026_eff99881,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793652 - logloss: 0.371576,[test] AUC: 0.793818 - logloss: 0.371478
+ 20240222-200229,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_009_077a69f8 --gpu 5,[exp_id] MaskNet_avazu_x4_001_009_077a69f8,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793625 - logloss: 0.371850,[test] AUC: 0.793777 - logloss: 0.371749
+ 20240222-202132,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_020_2b7f0a68 --gpu 2,[exp_id] MaskNet_avazu_x4_001_020_2b7f0a68,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793475 - logloss: 0.372081,[test] AUC: 0.793768 - logloss: 0.371918
+ 20240222-195401,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_002_9ef5839b --gpu 1,[exp_id] MaskNet_avazu_x4_001_002_9ef5839b,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793538 - logloss: 0.372006,[test] AUC: 0.793761 - logloss: 0.371889
+ 20240222-195339,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_003_76be1274 --gpu 2,[exp_id] MaskNet_avazu_x4_001_003_76be1274,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793649 - logloss: 0.371560,[test] AUC: 0.793760 - logloss: 0.371501
+ 20240222-195046,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_005_78175568 --gpu 4,[exp_id] MaskNet_avazu_x4_001_005_78175568,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793567 - logloss: 0.371969,[test] AUC: 0.793752 - logloss: 0.371831
+ 20240222-195139,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_008_776519e7 --gpu 7,[exp_id] MaskNet_avazu_x4_001_008_776519e7,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793541 - logloss: 0.371621,[test] AUC: 0.793751 - logloss: 0.371514
+ 20240222-195209,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_007_4dcce194 --gpu 6,[exp_id] MaskNet_avazu_x4_001_007_4dcce194,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793613 - logloss: 0.372054,[test] AUC: 0.793738 - logloss: 0.371967
+ 20240222-202130,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_023_b4bd40ab --gpu 3,[exp_id] MaskNet_avazu_x4_001_023_b4bd40ab,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793562 - logloss: 0.372052,[test] AUC: 0.793716 - logloss: 0.371938
+ 20240222-202416,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_021_ba3b920a --gpu 7,[exp_id] MaskNet_avazu_x4_001_021_ba3b920a,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793561 - logloss: 0.371632,[test] AUC: 0.793707 - logloss: 0.371564
+ 20240222-200623,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_014_ca388c2c --gpu 2,[exp_id] MaskNet_avazu_x4_001_014_ca388c2c,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793613 - logloss: 0.371877,[test] AUC: 0.793702 - logloss: 0.371817
+ 20240222-203732,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_032_404cab49 --gpu 7,[exp_id] MaskNet_avazu_x4_001_032_404cab49,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793640 - logloss: 0.371784,[test] AUC: 0.793687 - logloss: 0.371762
+ 20240222-202953,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_025_5ca67f2d --gpu 5,[exp_id] MaskNet_avazu_x4_001_025_5ca67f2d,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793551 - logloss: 0.372147,[test] AUC: 0.793660 - logloss: 0.372054
+ 20240222-202121,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_024_1028ff87 --gpu 0,[exp_id] MaskNet_avazu_x4_001_024_1028ff87,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793298 - logloss: 0.372440,[test] AUC: 0.793473 - logloss: 0.372317
+ 20240222-195032,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_006_32009e4b --gpu 5,[exp_id] MaskNet_avazu_x4_001_006_32009e4b,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793248 - logloss: 0.372228,[test] AUC: 0.793412 - logloss: 0.372118
+ 20240222-200619,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_015_efdd867c --gpu 1,[exp_id] MaskNet_avazu_x4_001_015_efdd867c,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793202 - logloss: 0.371822,[test] AUC: 0.793407 - logloss: 0.371724
+ 20240222-204300,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_033_6fff8ecb --gpu 5,[exp_id] MaskNet_avazu_x4_001_033_6fff8ecb,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.793193 - logloss: 0.371863,[test] AUC: 0.793359 - logloss: 0.371782
diff --git a/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03.yaml b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03.yaml
new file mode 100644
index 00000000..1fefa78b
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03.yaml
@@ -0,0 +1,42 @@
+base_config: ../model_zoo/MaskNet/config/
+base_expid: MaskNet_default
+dataset_id: avazu_x4_001
+
+dataset_config:
+ avazu_x4_001:
+ data_root: ../data/Avazu/
+ data_format: csv
+ train_data: ../data/Avazu/Avazu_x4/train.csv
+ valid_data: ../data/Avazu/Avazu_x4/valid.csv
+ test_data: ../data/Avazu/Avazu_x4/test.csv
+ min_categr_count: 2
+ feature_cols:
+ - {name: id, active: False, dtype: str, type: categorical}
+ - {name: hour, active: True, dtype: str, type: categorical, preprocess: convert_hour}
+ - {name: [C1,banner_pos,site_id,site_domain,site_category,app_id,app_domain,app_category,device_id,
+ device_ip,device_model,device_type,device_conn_type,C14,C15,C16,C17,C18,C19,C20,C21],
+ active: True, dtype: str, type: categorical}
+ - {name: weekday, active: True, dtype: str, type: categorical, preprocess: convert_weekday}
+ - {name: weekend, active: True, dtype: str, type: categorical, preprocess: convert_weekend}
+ label_col: {name: click, dtype: float}
+
+
+tuner_space:
+ model_root: './checkpoints/'
+ embedding_dim: 16
+ embedding_regularizer: [0, 1.e-9]
+ dnn_hidden_units: [[2000, 2000, 2000]]
+ dnn_hidden_activations: relu
+ model_type: ParallelMaskNet
+ parallel_num_blocks: [6, 10]
+ parallel_block_dim: [500, 100, 200]
+ reduction_ratio: [0.8, 0.4, 0.2]
+ emb_layernorm: False
+ net_layernorm: False
+ net_dropout: 0
+ learning_rate: 1.e-3
+ batch_size: 10000
+ seed: 2019
+ monitor: {'AUC': 1, 'logloss': -1}
+ metrics: [['AUC', 'logloss']]
+ early_stop_patience: 1
diff --git a/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03/dataset_config.yaml b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03/dataset_config.yaml
new file mode 100644
index 00000000..0de8b62a
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03/dataset_config.yaml
@@ -0,0 +1,19 @@
+avazu_x4_001_a31210da:
+ data_format: csv
+ data_root: ../data/Avazu/
+ feature_cols:
+ - {active: false, dtype: str, name: id, type: categorical}
+ - {active: true, dtype: str, name: hour, preprocess: convert_hour, type: categorical}
+ - active: true
+ dtype: str
+ name: [C1, banner_pos, site_id, site_domain, site_category, app_id, app_domain,
+ app_category, device_id, device_ip, device_model, device_type, device_conn_type,
+ C14, C15, C16, C17, C18, C19, C20, C21]
+ type: categorical
+ - {active: true, dtype: str, name: weekday, preprocess: convert_weekday, type: categorical}
+ - {active: true, dtype: str, name: weekend, preprocess: convert_weekend, type: categorical}
+ label_col: {dtype: float, name: click}
+ min_categr_count: 2
+ test_data: ../data/Avazu/Avazu_x4/test.csv
+ train_data: ../data/Avazu/Avazu_x4/train.csv
+ valid_data: ../data/Avazu/Avazu_x4/valid.csv
diff --git a/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03/model_config.yaml b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03/model_config.yaml
new file mode 100644
index 00000000..baf0d18b
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03/model_config.yaml
@@ -0,0 +1,1368 @@
+MaskNet_avazu_x4_001_001_bbac6c11:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_002_9ef5839b:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_003_76be1274:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_004_4c10b76a:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_005_78175568:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_006_32009e4b:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_007_4dcce194:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_008_776519e7:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_009_077a69f8:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_010_c00b0af8:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_011_a867ff31:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_012_7d3eddf1:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_013_c06e0e42:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_014_ca388c2c:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_015_efdd867c:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_016_50685b2e:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_017_97d6e691:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_018_1738e59f:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 0
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_019_541571c0:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_020_2b7f0a68:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_021_ba3b920a:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_022_f6170749:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_023_b4bd40ab:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_024_1028ff87:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_025_5ca67f2d:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_026_eff99881:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_027_16be52ea:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 6
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_028_7b840e59:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_029_20693171:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_030_63a9ed56:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_031_46003868:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_032_404cab49:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_033_6fff8ecb:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 100
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_034_6960833c:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_035_5dda1c36:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_avazu_x4_001_036_66863184:
+ batch_size: 10000
+ dataset_id: avazu_x4_001_a31210da
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [2000, 2000, 2000]
+ early_stop_patience: 1
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-09
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: {AUC: 1, logloss: -1}
+ monitor_mode: max
+ net_dropout: 0
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 10
+ pickle_feature_encoder: true
+ reduction_ratio: 0.2
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
diff --git a/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/README.md b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/README.md
new file mode 100644
index 00000000..c09b3b31
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/README.md
@@ -0,0 +1,182 @@
+## MaskNet_avazu_x4_001
+
+A hands-on guide to run the MaskNet model on the Avazu_x4 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+
+| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
+|:-----------------------------:|:-----------:|:--------:|:--------:|-------|
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.7.10
+ pytorch: 1.10.2+cu102
+ pandas: 1.1.5
+ numpy: 1.19.5
+ scipy: 1.5.2
+ sklearn: 0.22.1
+ pyyaml: 6.0.1
+ h5py: 2.8.0
+ tqdm: 4.64.0
+ keras_preprocessing: 1.1.2
+ fuxictr: 2.2.0
+
+ ```
+
+### Dataset
+Please refer to [Avazu_x4]([Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4)) to get the dataset details.
+
+### Code
+
+We use the [MaskNet](https://github.com/reczoo/FuxiCTR/tree/v2.2.0/model_zoo/MaskNet) model code from [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/tree/v2.2.0) for this experiment.
+
+Running steps:
+
+1. Download [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.2.0.zip) and install all the dependencies listed in the [environments](#environments).
+
+ ```bash
+ pip uninstall fuxictr
+ pip install fuxictr==2.2.0
+ ```
+
+2. Create a data directory and put the downloaded data files in `../data/Avazu/Avazu_x4`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [MaskNet_avazu_x4_tuner_config_03](./MaskNet_avazu_x4_tuner_config_03). Please make sure that the data paths in `dataset_config.yaml` are correctly set.
+
+4. Run the following script to start training and evaluation.
+
+ ```bash
+ cd FuxiCTR/model_zoo/MaskNet
+ nohup python run_expid.py --config YOUR_PATH/MaskNet/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_019_541571c0 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.794382 | 0.371189 |
+
+
+### Logs
+```python
+2024-02-22 20:06:23,961 P316688 INFO Params: {
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x4_001_a31210da",
+ "debug_mode": "False",
+ "dnn_hidden_activations": "relu",
+ "dnn_hidden_units": "[2000, 2000, 2000]",
+ "early_stop_patience": "1",
+ "emb_layernorm": "False",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-09",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': False, 'dtype': 'str', 'name': 'id', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'hour', 'preprocess': 'convert_hour', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': ['C1', 'banner_pos', 'site_id', 'site_domain', 'site_category', 'app_id', 'app_domain', 'app_category', 'device_id', 'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21'], 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekday', 'preprocess': 'convert_weekday', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'name': 'weekend', 'preprocess': 'convert_weekend', 'type': 'categorical'}]",
+ "feature_config": "None",
+ "feature_specs": "None",
+ "gpu": "1",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'click'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "2",
+ "model": "MaskNet",
+ "model_id": "MaskNet_avazu_x4_001_019_541571c0",
+ "model_root": "./checkpoints/",
+ "model_type": "ParallelMaskNet",
+ "monitor": "{'AUC': 1, 'logloss': -1}",
+ "monitor_mode": "max",
+ "net_dropout": "0",
+ "net_layernorm": "False",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "parallel_block_dim": "500",
+ "parallel_num_blocks": "6",
+ "pickle_feature_encoder": "True",
+ "reduction_ratio": "0.8",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x4/test.csv",
+ "train_data": "../data/Avazu/Avazu_x4/train.csv",
+ "use_features": "None",
+ "valid_data": "../data/Avazu/Avazu_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-22 20:06:23,962 P316688 INFO Set up feature processor...
+2024-02-22 20:06:23,963 P316688 WARNING Skip rebuilding ../data/Avazu/avazu_x4_001_a31210da/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-22 20:06:23,963 P316688 INFO Load feature_map from json: ../data/Avazu/avazu_x4_001_a31210da/feature_map.json
+2024-02-22 20:06:23,963 P316688 INFO Set column index...
+2024-02-22 20:06:23,963 P316688 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2556}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 10}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 434}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 70}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 173}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 62}",
+ "app_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 33}",
+ "app_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 400}",
+ "app_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6545}",
+ "banner_pos": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "device_conn_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "device_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 820509}",
+ "device_ip": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2903322}",
+ "device_model": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7259}",
+ "device_type": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 7}",
+ "hour": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 26}",
+ "site_category": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "site_domain": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5461}",
+ "site_id": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4051}",
+ "weekday": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 9}",
+ "weekend": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4}"
+}
+2024-02-22 20:06:28,906 P316688 INFO Total number of parameters: 76594787.
+2024-02-22 20:06:28,907 P316688 INFO Loading datasets...
+2024-02-22 20:06:49,843 P316688 INFO Train samples: total/32343172, blocks/1
+2024-02-22 20:06:52,314 P316688 INFO Validation samples: total/4042897, blocks/1
+2024-02-22 20:06:52,314 P316688 INFO Loading train and validation data done.
+2024-02-22 20:06:52,314 P316688 INFO Start training: 3235 batches/epoch
+2024-02-22 20:06:52,314 P316688 INFO ************ Epoch=1 start ************
+2024-02-22 20:13:41,048 P316688 INFO Train loss: 0.379669
+2024-02-22 20:13:41,048 P316688 INFO Evaluation @epoch 1 - batch 3235:
+2024-02-22 20:14:00,960 P316688 INFO [Metrics] AUC: 0.794253 - logloss: 0.371254
+2024-02-22 20:14:00,963 P316688 INFO Save best model: monitor(max)=0.423000
+2024-02-22 20:14:01,495 P316688 INFO ************ Epoch=1 end ************
+2024-02-22 20:20:51,541 P316688 INFO Train loss: 0.329410
+2024-02-22 20:20:51,542 P316688 INFO Evaluation @epoch 2 - batch 3235:
+2024-02-22 20:21:11,342 P316688 INFO [Metrics] AUC: 0.788830 - logloss: 0.379190
+2024-02-22 20:21:11,346 P316688 INFO Monitor(max)=0.409639 STOP!
+2024-02-22 20:21:11,346 P316688 INFO Reduce learning rate on plateau: 0.000100
+2024-02-22 20:21:11,346 P316688 INFO ********* Epoch==2 early stop *********
+2024-02-22 20:21:11,397 P316688 INFO Training finished.
+2024-02-22 20:21:11,397 P316688 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/avazu_x4_001_a31210da/MaskNet_avazu_x4_001_019_541571c0.model
+2024-02-22 20:21:11,641 P316688 INFO ****** Validation evaluation ******
+2024-02-22 20:21:31,510 P316688 INFO [Metrics] AUC: 0.794253 - logloss: 0.371254
+2024-02-22 20:21:31,627 P316688 INFO ******** Test evaluation ********
+2024-02-22 20:21:31,627 P316688 INFO Loading datasets...
+2024-02-22 20:21:34,114 P316688 INFO Test samples: total/4042898, blocks/1
+2024-02-22 20:21:34,114 P316688 INFO Loading test data done.
+2024-02-22 20:21:53,769 P316688 INFO [Metrics] AUC: 0.794382 - logloss: 0.371189
+
+```
diff --git a/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/environments.txt b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/environments.txt
new file mode 100644
index 00000000..5415575c
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/environments.txt
@@ -0,0 +1,18 @@
+[Hardware]
+CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+GPU: Tesla V100 32G
+RAM: 755G
+
+[Software]
+CUDA: 10.2
+python: 3.7.10
+pytorch: 1.10.2+cu102
+pandas: 1.1.5
+numpy: 1.19.5
+scipy: 1.5.2
+sklearn: 0.22.1
+pyyaml: 6.0.1
+h5py: 2.8.0
+tqdm: 4.64.0
+keras_preprocessing: 1.1.2
+fuxictr: 2.2.0
diff --git a/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/results.csv b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/results.csv
new file mode 100644
index 00000000..050d2097
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_avazu_x4_001/results.csv
@@ -0,0 +1 @@
+ 20240222-202153,[command] python run_expid.py --config Avazu_x4/MaskNet_avazu_x4_001/MaskNet_avazu_x4_tuner_config_03 --expid MaskNet_avazu_x4_001_019_541571c0 --gpu 1,[exp_id] MaskNet_avazu_x4_001_019_541571c0,[dataset_id] avazu_x4_001_a31210da,[train] N.A.,[val] AUC: 0.794253 - logloss: 0.371254,[test] AUC: 0.794382 - logloss: 0.371189
diff --git a/ranking/ctr/MaskNet/MaskNet_criteo_x1/README.md b/ranking/ctr/MaskNet/MaskNet_criteo_x1/README.md
index d7bf3a02..9d01d875 100644
--- a/ranking/ctr/MaskNet/MaskNet_criteo_x1/README.md
+++ b/ranking/ctr/MaskNet/MaskNet_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the MaskNet model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -39,11 +39,11 @@ Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [MaskNet](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/MaskNet.py).
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [MaskNet](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/MaskNet.py).
Running steps:
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_001_018_ccc857cd.log b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_001_018_ccc857cd.log
new file mode 100644
index 00000000..c0324c8a
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_001_018_ccc857cd.log
@@ -0,0 +1,168 @@
+2024-02-23 11:34:31,352 P3402379 INFO Params: {
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x4_001_a5e05ce7",
+ "debug_mode": "False",
+ "dnn_hidden_activations": "relu",
+ "dnn_hidden_units": "[1000, 1000, 1000, 1000, 1000]",
+ "early_stop_patience": "2",
+ "emb_layernorm": "False",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'fill_na': 0, 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'preprocess': 'convert_to_bucket', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'fill_na': '', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "feature_config": "None",
+ "feature_specs": "None",
+ "gpu": "1",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'Label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "10",
+ "model": "MaskNet",
+ "model_id": "MaskNet_criteo_x4_001_018_ccc857cd",
+ "model_root": "./checkpoints/",
+ "model_type": "ParallelMaskNet",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0.3",
+ "net_layernorm": "False",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "parallel_block_dim": "500",
+ "parallel_num_blocks": "2",
+ "pickle_feature_encoder": "True",
+ "reduction_ratio": "0.1",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x4/test.csv",
+ "train_data": "../data/Criteo/Criteo_x4/train.csv",
+ "use_features": "None",
+ "valid_data": "../data/Criteo/Criteo_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-23 11:34:31,353 P3402379 INFO Set up feature processor...
+2024-02-23 11:34:31,353 P3402379 WARNING Skip rebuilding ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-23 11:34:31,353 P3402379 INFO Load feature_map from json: ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json
+2024-02-23 11:34:31,354 P3402379 INFO Set column index...
+2024-02-23 11:34:31,354 P3402379 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 1446}",
+ "C10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 39530}",
+ "C11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5131}",
+ "C12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 156656}",
+ "C13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 3176}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 28}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11043}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 148913}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 12}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4560}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2003}",
+ "C2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 554}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 154564}",
+ "C22": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C23": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 17}",
+ "C24": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 53031}",
+ "C25": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 82}",
+ "C26": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 40955}",
+ "C3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 157339}",
+ "C4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 117822}",
+ "C5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 306}",
+ "C6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11882}",
+ "C8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 630}",
+ "C9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "I1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 44}",
+ "I10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "I11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "I12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 37}",
+ "I13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 72}",
+ "I2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 99}",
+ "I3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 122}",
+ "I4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 41}",
+ "I5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 220}",
+ "I6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 112}",
+ "I7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 80}",
+ "I8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 69}",
+ "I9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 92}"
+}
+2024-02-23 11:34:35,198 P3402379 INFO Total number of parameters: 20358077.
+2024-02-23 11:34:35,198 P3402379 INFO Loading datasets...
+2024-02-23 11:35:11,583 P3402379 INFO Train samples: total/36672493, blocks/1
+2024-02-23 11:35:16,175 P3402379 INFO Validation samples: total/4584062, blocks/1
+2024-02-23 11:35:16,175 P3402379 INFO Loading train and validation data done.
+2024-02-23 11:35:16,175 P3402379 INFO Start training: 3668 batches/epoch
+2024-02-23 11:35:16,175 P3402379 INFO ************ Epoch=1 start ************
+2024-02-23 11:40:13,140 P3402379 INFO Train loss: 0.459513
+2024-02-23 11:40:13,141 P3402379 INFO Evaluation @epoch 1 - batch 3668:
+2024-02-23 11:40:27,684 P3402379 INFO [Metrics] AUC: 0.806480
+2024-02-23 11:40:27,686 P3402379 INFO Save best model: monitor(max)=0.806480
+2024-02-23 11:40:27,864 P3402379 INFO ************ Epoch=1 end ************
+2024-02-23 11:45:24,583 P3402379 INFO Train loss: 0.452990
+2024-02-23 11:45:24,583 P3402379 INFO Evaluation @epoch 2 - batch 3668:
+2024-02-23 11:45:39,011 P3402379 INFO [Metrics] AUC: 0.808234
+2024-02-23 11:45:39,012 P3402379 INFO Save best model: monitor(max)=0.808234
+2024-02-23 11:45:39,189 P3402379 INFO ************ Epoch=2 end ************
+2024-02-23 11:50:35,804 P3402379 INFO Train loss: 0.451728
+2024-02-23 11:50:35,804 P3402379 INFO Evaluation @epoch 3 - batch 3668:
+2024-02-23 11:50:50,241 P3402379 INFO [Metrics] AUC: 0.809256
+2024-02-23 11:50:50,242 P3402379 INFO Save best model: monitor(max)=0.809256
+2024-02-23 11:50:50,429 P3402379 INFO ************ Epoch=3 end ************
+2024-02-23 11:55:47,287 P3402379 INFO Train loss: 0.451147
+2024-02-23 11:55:47,287 P3402379 INFO Evaluation @epoch 4 - batch 3668:
+2024-02-23 11:56:01,675 P3402379 INFO [Metrics] AUC: 0.809425
+2024-02-23 11:56:01,676 P3402379 INFO Save best model: monitor(max)=0.809425
+2024-02-23 11:56:01,848 P3402379 INFO ************ Epoch=4 end ************
+2024-02-23 12:00:59,781 P3402379 INFO Train loss: 0.450759
+2024-02-23 12:00:59,782 P3402379 INFO Evaluation @epoch 5 - batch 3668:
+2024-02-23 12:01:14,401 P3402379 INFO [Metrics] AUC: 0.809680
+2024-02-23 12:01:14,405 P3402379 INFO Save best model: monitor(max)=0.809680
+2024-02-23 12:01:14,596 P3402379 INFO ************ Epoch=5 end ************
+2024-02-23 12:06:12,225 P3402379 INFO Train loss: 0.450590
+2024-02-23 12:06:12,226 P3402379 INFO Evaluation @epoch 6 - batch 3668:
+2024-02-23 12:06:27,233 P3402379 INFO [Metrics] AUC: 0.809876
+2024-02-23 12:06:27,234 P3402379 INFO Save best model: monitor(max)=0.809876
+2024-02-23 12:06:27,415 P3402379 INFO ************ Epoch=6 end ************
+2024-02-23 12:11:25,413 P3402379 INFO Train loss: 0.450475
+2024-02-23 12:11:25,413 P3402379 INFO Evaluation @epoch 7 - batch 3668:
+2024-02-23 12:11:40,046 P3402379 INFO [Metrics] AUC: 0.809802
+2024-02-23 12:11:40,048 P3402379 INFO Monitor(max)=0.809802 STOP!
+2024-02-23 12:11:40,048 P3402379 INFO Reduce learning rate on plateau: 0.000100
+2024-02-23 12:11:40,098 P3402379 INFO ************ Epoch=7 end ************
+2024-02-23 12:16:36,465 P3402379 INFO Train loss: 0.440398
+2024-02-23 12:16:36,466 P3402379 INFO Evaluation @epoch 8 - batch 3668:
+2024-02-23 12:16:51,101 P3402379 INFO [Metrics] AUC: 0.812577
+2024-02-23 12:16:51,103 P3402379 INFO Save best model: monitor(max)=0.812577
+2024-02-23 12:16:51,276 P3402379 INFO ************ Epoch=8 end ************
+2024-02-23 12:21:47,033 P3402379 INFO Train loss: 0.436010
+2024-02-23 12:21:47,034 P3402379 INFO Evaluation @epoch 9 - batch 3668:
+2024-02-23 12:22:02,056 P3402379 INFO [Metrics] AUC: 0.812951
+2024-02-23 12:22:02,060 P3402379 INFO Save best model: monitor(max)=0.812951
+2024-02-23 12:22:02,238 P3402379 INFO ************ Epoch=9 end ************
+2024-02-23 12:26:56,632 P3402379 INFO Train loss: 0.433695
+2024-02-23 12:26:56,632 P3402379 INFO Evaluation @epoch 10 - batch 3668:
+2024-02-23 12:27:11,256 P3402379 INFO [Metrics] AUC: 0.812880
+2024-02-23 12:27:11,258 P3402379 INFO Monitor(max)=0.812880 STOP!
+2024-02-23 12:27:11,258 P3402379 INFO Reduce learning rate on plateau: 0.000010
+2024-02-23 12:27:11,308 P3402379 INFO ************ Epoch=10 end ************
+2024-02-23 12:32:05,896 P3402379 INFO Train loss: 0.430003
+2024-02-23 12:32:05,896 P3402379 INFO Evaluation @epoch 11 - batch 3668:
+2024-02-23 12:32:20,496 P3402379 INFO [Metrics] AUC: 0.812549
+2024-02-23 12:32:20,497 P3402379 INFO Monitor(max)=0.812549 STOP!
+2024-02-23 12:32:20,498 P3402379 INFO Reduce learning rate on plateau: 0.000001
+2024-02-23 12:32:20,498 P3402379 INFO ********* Epoch==11 early stop *********
+2024-02-23 12:32:20,560 P3402379 INFO Training finished.
+2024-02-23 12:32:20,560 P3402379 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/criteo_x4_001_a5e05ce7/MaskNet_criteo_x4_001_018_ccc857cd.model
+2024-02-23 12:32:20,644 P3402379 INFO ****** Validation evaluation ******
+2024-02-23 12:32:36,461 P3402379 INFO [Metrics] AUC: 0.812951 - logloss: 0.439143
+2024-02-23 12:32:36,559 P3402379 INFO ******** Test evaluation ********
+2024-02-23 12:32:36,559 P3402379 INFO Loading datasets...
+2024-02-23 12:32:41,178 P3402379 INFO Test samples: total/4584062, blocks/1
+2024-02-23 12:32:41,178 P3402379 INFO Loading test data done.
+2024-02-23 12:32:57,223 P3402379 INFO [Metrics] AUC: 0.813420 - logloss: 0.438748
diff --git a/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06.csv b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06.csv
new file mode 100644
index 00000000..31c876ff
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06.csv
@@ -0,0 +1,36 @@
+ 20240223-123257,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_018_ccc857cd --gpu 1,[exp_id] MaskNet_criteo_x4_001_018_ccc857cd,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812951 - logloss: 0.439143,[test] AUC: 0.813420 - logloss: 0.438748
+ 20240223-104054,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_004_e17e1f4f --gpu 3,[exp_id] MaskNet_criteo_x4_001_004_e17e1f4f,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812876 - logloss: 0.439222,[test] AUC: 0.813394 - logloss: 0.438766
+ 20240223-145738,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_035_44646680 --gpu 6,[exp_id] MaskNet_criteo_x4_001_035_44646680,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812836 - logloss: 0.443358,[test] AUC: 0.813389 - logloss: 0.442911
+ 20240223-112418,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_010_21537cc6 --gpu 3,[exp_id] MaskNet_criteo_x4_001_010_21537cc6,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812785 - logloss: 0.439318,[test] AUC: 0.813324 - logloss: 0.438903
+ 20240223-111941,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_005_34d2c00a --gpu 4,[exp_id] MaskNet_criteo_x4_001_005_34d2c00a,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812842 - logloss: 0.440213,[test] AUC: 0.813306 - logloss: 0.439862
+ 20240223-125938,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_017_dbb18303 --gpu 3,[exp_id] MaskNet_criteo_x4_001_017_dbb18303,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812832 - logloss: 0.443291,[test] AUC: 0.813296 - logloss: 0.442892
+ 20240223-133031,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_023_04420727 --gpu 5,[exp_id] MaskNet_criteo_x4_001_023_04420727,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812738 - logloss: 0.440779,[test] AUC: 0.813263 - logloss: 0.440373
+ 20240223-110849,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_006_16ccb897 --gpu 5,[exp_id] MaskNet_criteo_x4_001_006_16ccb897,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812842 - logloss: 0.439240,[test] AUC: 0.813237 - logloss: 0.438894
+ 20240223-143421,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_034_b6ebf8a8 --gpu 7,[exp_id] MaskNet_criteo_x4_001_034_b6ebf8a8,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812577 - logloss: 0.439589,[test] AUC: 0.813220 - logloss: 0.439036
+ 20240223-131252,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_024_a93dc017 --gpu 4,[exp_id] MaskNet_criteo_x4_001_024_a93dc017,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812760 - logloss: 0.439673,[test] AUC: 0.813146 - logloss: 0.439355
+ 20240223-105141,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_003_d50b7409 --gpu 2,[exp_id] MaskNet_criteo_x4_001_003_d50b7409,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812562 - logloss: 0.440444,[test] AUC: 0.813125 - logloss: 0.440011
+ 20240223-120445,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_015_6ccd12b8 --gpu 5,[exp_id] MaskNet_criteo_x4_001_015_6ccd12b8,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812479 - logloss: 0.441485,[test] AUC: 0.813088 - logloss: 0.440972
+ 20240223-114833,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_012_f5fb7b1c --gpu 2,[exp_id] MaskNet_criteo_x4_001_012_f5fb7b1c,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812427 - logloss: 0.439974,[test] AUC: 0.813034 - logloss: 0.439469
+ 20240223-104149,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_002_84023d8b --gpu 1,[exp_id] MaskNet_criteo_x4_001_002_84023d8b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812551 - logloss: 0.439495,[test] AUC: 0.813007 - logloss: 0.439130
+ 20240223-104049,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_008_32f23675 --gpu 7,[exp_id] MaskNet_criteo_x4_001_008_32f23675,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812516 - logloss: 0.439492,[test] AUC: 0.813005 - logloss: 0.439061
+ 20240223-114341,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_009_816098e9 --gpu 7,[exp_id] MaskNet_criteo_x4_001_009_816098e9,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812448 - logloss: 0.447382,[test] AUC: 0.812971 - logloss: 0.446927
+ 20240223-151129,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_036_32f23675 --gpu 0,[exp_id] MaskNet_criteo_x4_001_036_32f23675,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812463 - logloss: 0.439551,[test] AUC: 0.812930 - logloss: 0.439154
+ 20240223-124038,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_021_63fefee7 --gpu 2,[exp_id] MaskNet_criteo_x4_001_021_63fefee7,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812440 - logloss: 0.442305,[test] AUC: 0.812902 - logloss: 0.441942
+ 20240223-123551,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_022_eb4c0071 --gpu 0,[exp_id] MaskNet_criteo_x4_001_022_eb4c0071,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812439 - logloss: 0.439541,[test] AUC: 0.812885 - logloss: 0.439164
+ 20240223-120813,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_016_37670541 --gpu 4,[exp_id] MaskNet_criteo_x4_001_016_37670541,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812328 - logloss: 0.439521,[test] AUC: 0.812853 - logloss: 0.439053
+ 20240223-142502,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_029_1f0807d1 --gpu 2,[exp_id] MaskNet_criteo_x4_001_029_1f0807d1,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812381 - logloss: 0.443683,[test] AUC: 0.812843 - logloss: 0.443332
+ 20240223-113427,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_011_b17be79c --gpu 1,[exp_id] MaskNet_criteo_x4_001_011_b17be79c,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812265 - logloss: 0.442219,[test] AUC: 0.812840 - logloss: 0.441757
+ 20240223-123026,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_020_7bb6763e --gpu 6,[exp_id] MaskNet_criteo_x4_001_020_7bb6763e,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812229 - logloss: 0.439671,[test] AUC: 0.812746 - logloss: 0.439243
+ 20240223-105215,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_001_1f0807d1 --gpu 0,[exp_id] MaskNet_criteo_x4_001_001_1f0807d1,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812232 - logloss: 0.442120,[test] AUC: 0.812688 - logloss: 0.441730
+ 20240223-141536,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_030_84023d8b --gpu 3,[exp_id] MaskNet_criteo_x4_001_030_84023d8b,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812103 - logloss: 0.439938,[test] AUC: 0.812558 - logloss: 0.439574
+ 20240223-115000,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_013_c2aaad80 --gpu 0,[exp_id] MaskNet_criteo_x4_001_013_c2aaad80,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.811868 - logloss: 0.441735,[test] AUC: 0.812330 - logloss: 0.441371
+ 20240223-133720,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_026_d945834f --gpu 1,[exp_id] MaskNet_criteo_x4_001_026_d945834f,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.811533 - logloss: 0.440320,[test] AUC: 0.812083 - logloss: 0.439880
+ 20240223-105523,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_007_44646680 --gpu 6,[exp_id] MaskNet_criteo_x4_001_007_44646680,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.811705 - logloss: 0.442665,[test] AUC: 0.812043 - logloss: 0.442381
+ 20240223-114652,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_014_66998b72 --gpu 6,[exp_id] MaskNet_criteo_x4_001_014_66998b72,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.811535 - logloss: 0.440365,[test] AUC: 0.811941 - logloss: 0.440037
+ 20240223-123840,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_019_f8e0847d --gpu 7,[exp_id] MaskNet_criteo_x4_001_019_f8e0847d,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.811285 - logloss: 0.443009,[test] AUC: 0.811865 - logloss: 0.442542
+ 20240223-134446,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_028_f9de713a --gpu 7,[exp_id] MaskNet_criteo_x4_001_028_f9de713a,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.811352 - logloss: 0.440629,[test] AUC: 0.811771 - logloss: 0.440289
+ 20240223-145132,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_033_b5751be4 --gpu 1,[exp_id] MaskNet_criteo_x4_001_033_b5751be4,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.810843 - logloss: 0.443047,[test] AUC: 0.811489 - logloss: 0.442526
+ 20240223-140207,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_027_57776d87 --gpu 0,[exp_id] MaskNet_criteo_x4_001_027_57776d87,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.810622 - logloss: 0.444136,[test] AUC: 0.811198 - logloss: 0.443698
+ 20240223-141847,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_032_70768930 --gpu 5,[exp_id] MaskNet_criteo_x4_001_032_70768930,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.810524 - logloss: 0.442483,[test] AUC: 0.810952 - logloss: 0.442121
+ 20240223-135623,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_025_de8458e2 --gpu 6,[exp_id] MaskNet_criteo_x4_001_025_de8458e2,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.809831 - logloss: 0.445614,[test] AUC: 0.810454 - logloss: 0.445102
+ 20240223-142824,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_031_fc99183d --gpu 4,[exp_id] MaskNet_criteo_x4_001_031_fc99183d,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.809855 - logloss: 0.444742,[test] AUC: 0.810315 - logloss: 0.444394
diff --git a/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06.yaml b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06.yaml
new file mode 100644
index 00000000..dcc63ea0
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06.yaml
@@ -0,0 +1,46 @@
+base_config: ../model_zoo/MaskNet/config/
+base_expid: MaskNet_default
+dataset_id: criteo_x4_001
+
+dataset_config:
+ criteo_x4_001:
+ data_format: csv
+ data_root: ../data/Criteo/
+ feature_cols:
+ - active: true
+ dtype: float
+ fill_na: 0
+ name: [I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13]
+ preprocess: convert_to_bucket
+ type: categorical
+ - active: true
+ dtype: str
+ fill_na: ''
+ name: [C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16,
+ C17, C18, C19, C20, C21, C22, C23, C24, C25, C26]
+ type: categorical
+ label_col: {dtype: float, name: Label}
+ min_categr_count: 10
+ test_data: ../data/Criteo/Criteo_x4/test.csv
+ train_data: ../data/Criteo/Criteo_x4/train.csv
+ valid_data: ../data/Criteo/Criteo_x4/valid.csv
+
+tuner_space:
+ model_root: './checkpoints/'
+ embedding_dim: 16
+ embedding_regularizer: 1.e-5
+ dnn_hidden_units: [[1000, 1000, 1000, 1000, 1000]]
+ dnn_hidden_activations: relu
+ model_type: ParallelMaskNet
+ parallel_num_blocks: [1, 2, 8]
+ parallel_block_dim: [500, 200]
+ reduction_ratio: [0.8, 0.4, 0.1]
+ emb_layernorm: False
+ net_layernorm: [True, False]
+ net_dropout: 0.3
+ learning_rate: 1.e-3
+ batch_size: 10000
+ seed: 2019
+ monitor: 'AUC'
+ monitor_mode: 'max'
+ metrics: [['AUC', 'logloss']]
diff --git a/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06/dataset_config.yaml b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06/dataset_config.yaml
new file mode 100644
index 00000000..73e334c1
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06/dataset_config.yaml
@@ -0,0 +1,21 @@
+criteo_x4_001_a5e05ce7:
+ data_format: csv
+ data_root: ../data/Criteo/
+ feature_cols:
+ - active: true
+ dtype: float
+ fill_na: 0
+ name: [I1, I2, I3, I4, I5, I6, I7, I8, I9, I10, I11, I12, I13]
+ preprocess: convert_to_bucket
+ type: categorical
+ - active: true
+ dtype: str
+ fill_na: ''
+ name: [C1, C2, C3, C4, C5, C6, C7, C8, C9, C10, C11, C12, C13, C14, C15, C16,
+ C17, C18, C19, C20, C21, C22, C23, C24, C25, C26]
+ type: categorical
+ label_col: {dtype: float, name: Label}
+ min_categr_count: 10
+ test_data: ../data/Criteo/Criteo_x4/test.csv
+ train_data: ../data/Criteo/Criteo_x4/train.csv
+ valid_data: ../data/Criteo/Criteo_x4/valid.csv
diff --git a/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06/model_config.yaml b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06/model_config.yaml
new file mode 100644
index 00000000..55350b19
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06/model_config.yaml
@@ -0,0 +1,1368 @@
+MaskNet_criteo_x4_001_001_1f0807d1:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_002_84023d8b:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_003_d50b7409:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_004_e17e1f4f:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_005_34d2c00a:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_006_16ccb897:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_007_44646680:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_008_32f23675:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_009_816098e9:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_010_21537cc6:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_011_b17be79c:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_012_f5fb7b1c:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 1
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_013_c2aaad80:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_014_66998b72:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_015_6ccd12b8:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_016_37670541:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_017_dbb18303:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_018_ccc857cd:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_019_f8e0847d:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_020_7bb6763e:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_021_63fefee7:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_022_eb4c0071:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_023_04420727:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_024_a93dc017:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 2
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_025_de8458e2:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_026_d945834f:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_027_57776d87:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_028_f9de713a:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_029_1f0807d1:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_030_84023d8b:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 500
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_031_fc99183d:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_032_70768930:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.8
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_033_b5751be4:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_034_b6ebf8a8:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.4
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_035_44646680:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: true
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
+MaskNet_criteo_x4_001_036_32f23675:
+ batch_size: 10000
+ dataset_id: criteo_x4_001_a5e05ce7
+ debug_mode: false
+ dnn_hidden_activations: relu
+ dnn_hidden_units: [1000, 1000, 1000, 1000, 1000]
+ early_stop_patience: 2
+ emb_layernorm: false
+ embedding_dim: 16
+ embedding_regularizer: 1.0e-05
+ epochs: 100
+ eval_steps: null
+ feature_config: null
+ feature_specs: null
+ group_id: null
+ learning_rate: 0.001
+ loss: binary_crossentropy
+ metrics: [AUC, logloss]
+ model: MaskNet
+ model_root: ./checkpoints/
+ model_type: ParallelMaskNet
+ monitor: AUC
+ monitor_mode: max
+ net_dropout: 0.3
+ net_layernorm: false
+ net_regularizer: 0
+ num_workers: 3
+ optimizer: adam
+ parallel_block_dim: 200
+ parallel_num_blocks: 8
+ pickle_feature_encoder: true
+ reduction_ratio: 0.1
+ save_best_only: true
+ seed: 2019
+ shuffle: true
+ task: binary_classification
+ use_features: null
+ verbose: 1
diff --git a/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/README.md b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/README.md
new file mode 100644
index 00000000..5fb7ae3d
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/README.md
@@ -0,0 +1,243 @@
+## MaskNet_criteo_x4_001
+
+A hands-on guide to run the MaskNet model on the Criteo_x4 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+
+| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
+|:-----------------------------:|:-----------:|:--------:|:--------:|-------|
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.7.10
+ pytorch: 1.10.2+cu102
+ pandas: 1.1.5
+ numpy: 1.19.5
+ scipy: 1.5.2
+ sklearn: 0.22.1
+ pyyaml: 6.0.1
+ h5py: 2.8.0
+ tqdm: 4.64.0
+ keras_preprocessing: 1.1.2
+ fuxictr: 2.2.0
+ ```
+
+### Dataset
+Please refer to [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4) to get the dataset details.
+
+### Code
+
+We use the [MaskNet](https://github.com/reczoo/FuxiCTR/tree/v2.2.0/model_zoo/MaskNet) model code from [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/tree/v2.2.0) for this experiment.
+
+Running steps:
+
+1. Download [FuxiCTR-v2.2.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.2.0.zip) and install all the dependencies listed in the [environments](#environments).
+
+ ```bash
+ pip uninstall fuxictr
+ pip install fuxictr==2.2.0
+ ```
+
+2. Create a data directory and put the downloaded data files in `../data/Criteo/Criteo_x4`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [MaskNet_criteo_x4_tuner_config_06](./MaskNet_criteo_x4_tuner_config_06). Please make sure that the data paths in `dataset_config.yaml` are correctly set.
+
+4. Run the following script to start training and evaluation.
+
+ ```bash
+ cd FuxiCTR/model_zoo/MaskNet
+ nohup python run_expid.py --config YOUR_PATH/MaskNet/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_018_ccc857cd --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.813420 | 0.438748 |
+
+
+### Logs
+```python
+2024-02-23 11:34:31,352 P3402379 INFO Params: {
+ "batch_size": "10000",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x4_001_a5e05ce7",
+ "debug_mode": "False",
+ "dnn_hidden_activations": "relu",
+ "dnn_hidden_units": "[1000, 1000, 1000, 1000, 1000]",
+ "early_stop_patience": "2",
+ "emb_layernorm": "False",
+ "embedding_dim": "16",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "eval_steps": "None",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'fill_na': 0, 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'preprocess': 'convert_to_bucket', 'type': 'categorical'}, {'active': True, 'dtype': 'str', 'fill_na': '', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "feature_config": "None",
+ "feature_specs": "None",
+ "gpu": "1",
+ "group_id": "None",
+ "label_col": "{'dtype': 'float', 'name': 'Label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "10",
+ "model": "MaskNet",
+ "model_id": "MaskNet_criteo_x4_001_018_ccc857cd",
+ "model_root": "./checkpoints/",
+ "model_type": "ParallelMaskNet",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0.3",
+ "net_layernorm": "False",
+ "net_regularizer": "0",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "parallel_block_dim": "500",
+ "parallel_num_blocks": "2",
+ "pickle_feature_encoder": "True",
+ "reduction_ratio": "0.1",
+ "save_best_only": "True",
+ "seed": "2019",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x4/test.csv",
+ "train_data": "../data/Criteo/Criteo_x4/train.csv",
+ "use_features": "None",
+ "valid_data": "../data/Criteo/Criteo_x4/valid.csv",
+ "verbose": "1"
+}
+2024-02-23 11:34:31,353 P3402379 INFO Set up feature processor...
+2024-02-23 11:34:31,353 P3402379 WARNING Skip rebuilding ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json. Please delete it manually if rebuilding is required.
+2024-02-23 11:34:31,353 P3402379 INFO Load feature_map from json: ../data/Criteo/criteo_x4_001_a5e05ce7/feature_map.json
+2024-02-23 11:34:31,354 P3402379 INFO Set column index...
+2024-02-23 11:34:31,354 P3402379 INFO Feature specs: {
+ "C1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 1446}",
+ "C10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 39530}",
+ "C11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5131}",
+ "C12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 156656}",
+ "C13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 3176}",
+ "C14": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 28}",
+ "C15": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11043}",
+ "C16": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 148913}",
+ "C17": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 12}",
+ "C18": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 4560}",
+ "C19": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 2003}",
+ "C2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 554}",
+ "C20": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "C21": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 154564}",
+ "C22": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C23": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 17}",
+ "C24": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 53031}",
+ "C25": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 82}",
+ "C26": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 40955}",
+ "C3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 157339}",
+ "C4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 117822}",
+ "C5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 306}",
+ "C6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 18}",
+ "C7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 11882}",
+ "C8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 630}",
+ "C9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 5}",
+ "I1": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 44}",
+ "I10": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 6}",
+ "I11": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 27}",
+ "I12": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 37}",
+ "I13": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 72}",
+ "I2": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 99}",
+ "I3": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 122}",
+ "I4": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 41}",
+ "I5": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 220}",
+ "I6": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 112}",
+ "I7": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 80}",
+ "I8": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 69}",
+ "I9": "{'source': '', 'type': 'categorical', 'padding_idx': 0, 'vocab_size': 92}"
+}
+2024-02-23 11:34:35,198 P3402379 INFO Total number of parameters: 20358077.
+2024-02-23 11:34:35,198 P3402379 INFO Loading datasets...
+2024-02-23 11:35:11,583 P3402379 INFO Train samples: total/36672493, blocks/1
+2024-02-23 11:35:16,175 P3402379 INFO Validation samples: total/4584062, blocks/1
+2024-02-23 11:35:16,175 P3402379 INFO Loading train and validation data done.
+2024-02-23 11:35:16,175 P3402379 INFO Start training: 3668 batches/epoch
+2024-02-23 11:35:16,175 P3402379 INFO ************ Epoch=1 start ************
+2024-02-23 11:40:13,140 P3402379 INFO Train loss: 0.459513
+2024-02-23 11:40:13,141 P3402379 INFO Evaluation @epoch 1 - batch 3668:
+2024-02-23 11:40:27,684 P3402379 INFO [Metrics] AUC: 0.806480
+2024-02-23 11:40:27,686 P3402379 INFO Save best model: monitor(max)=0.806480
+2024-02-23 11:40:27,864 P3402379 INFO ************ Epoch=1 end ************
+2024-02-23 11:45:24,583 P3402379 INFO Train loss: 0.452990
+2024-02-23 11:45:24,583 P3402379 INFO Evaluation @epoch 2 - batch 3668:
+2024-02-23 11:45:39,011 P3402379 INFO [Metrics] AUC: 0.808234
+2024-02-23 11:45:39,012 P3402379 INFO Save best model: monitor(max)=0.808234
+2024-02-23 11:45:39,189 P3402379 INFO ************ Epoch=2 end ************
+2024-02-23 11:50:35,804 P3402379 INFO Train loss: 0.451728
+2024-02-23 11:50:35,804 P3402379 INFO Evaluation @epoch 3 - batch 3668:
+2024-02-23 11:50:50,241 P3402379 INFO [Metrics] AUC: 0.809256
+2024-02-23 11:50:50,242 P3402379 INFO Save best model: monitor(max)=0.809256
+2024-02-23 11:50:50,429 P3402379 INFO ************ Epoch=3 end ************
+2024-02-23 11:55:47,287 P3402379 INFO Train loss: 0.451147
+2024-02-23 11:55:47,287 P3402379 INFO Evaluation @epoch 4 - batch 3668:
+2024-02-23 11:56:01,675 P3402379 INFO [Metrics] AUC: 0.809425
+2024-02-23 11:56:01,676 P3402379 INFO Save best model: monitor(max)=0.809425
+2024-02-23 11:56:01,848 P3402379 INFO ************ Epoch=4 end ************
+2024-02-23 12:00:59,781 P3402379 INFO Train loss: 0.450759
+2024-02-23 12:00:59,782 P3402379 INFO Evaluation @epoch 5 - batch 3668:
+2024-02-23 12:01:14,401 P3402379 INFO [Metrics] AUC: 0.809680
+2024-02-23 12:01:14,405 P3402379 INFO Save best model: monitor(max)=0.809680
+2024-02-23 12:01:14,596 P3402379 INFO ************ Epoch=5 end ************
+2024-02-23 12:06:12,225 P3402379 INFO Train loss: 0.450590
+2024-02-23 12:06:12,226 P3402379 INFO Evaluation @epoch 6 - batch 3668:
+2024-02-23 12:06:27,233 P3402379 INFO [Metrics] AUC: 0.809876
+2024-02-23 12:06:27,234 P3402379 INFO Save best model: monitor(max)=0.809876
+2024-02-23 12:06:27,415 P3402379 INFO ************ Epoch=6 end ************
+2024-02-23 12:11:25,413 P3402379 INFO Train loss: 0.450475
+2024-02-23 12:11:25,413 P3402379 INFO Evaluation @epoch 7 - batch 3668:
+2024-02-23 12:11:40,046 P3402379 INFO [Metrics] AUC: 0.809802
+2024-02-23 12:11:40,048 P3402379 INFO Monitor(max)=0.809802 STOP!
+2024-02-23 12:11:40,048 P3402379 INFO Reduce learning rate on plateau: 0.000100
+2024-02-23 12:11:40,098 P3402379 INFO ************ Epoch=7 end ************
+2024-02-23 12:16:36,465 P3402379 INFO Train loss: 0.440398
+2024-02-23 12:16:36,466 P3402379 INFO Evaluation @epoch 8 - batch 3668:
+2024-02-23 12:16:51,101 P3402379 INFO [Metrics] AUC: 0.812577
+2024-02-23 12:16:51,103 P3402379 INFO Save best model: monitor(max)=0.812577
+2024-02-23 12:16:51,276 P3402379 INFO ************ Epoch=8 end ************
+2024-02-23 12:21:47,033 P3402379 INFO Train loss: 0.436010
+2024-02-23 12:21:47,034 P3402379 INFO Evaluation @epoch 9 - batch 3668:
+2024-02-23 12:22:02,056 P3402379 INFO [Metrics] AUC: 0.812951
+2024-02-23 12:22:02,060 P3402379 INFO Save best model: monitor(max)=0.812951
+2024-02-23 12:22:02,238 P3402379 INFO ************ Epoch=9 end ************
+2024-02-23 12:26:56,632 P3402379 INFO Train loss: 0.433695
+2024-02-23 12:26:56,632 P3402379 INFO Evaluation @epoch 10 - batch 3668:
+2024-02-23 12:27:11,256 P3402379 INFO [Metrics] AUC: 0.812880
+2024-02-23 12:27:11,258 P3402379 INFO Monitor(max)=0.812880 STOP!
+2024-02-23 12:27:11,258 P3402379 INFO Reduce learning rate on plateau: 0.000010
+2024-02-23 12:27:11,308 P3402379 INFO ************ Epoch=10 end ************
+2024-02-23 12:32:05,896 P3402379 INFO Train loss: 0.430003
+2024-02-23 12:32:05,896 P3402379 INFO Evaluation @epoch 11 - batch 3668:
+2024-02-23 12:32:20,496 P3402379 INFO [Metrics] AUC: 0.812549
+2024-02-23 12:32:20,497 P3402379 INFO Monitor(max)=0.812549 STOP!
+2024-02-23 12:32:20,498 P3402379 INFO Reduce learning rate on plateau: 0.000001
+2024-02-23 12:32:20,498 P3402379 INFO ********* Epoch==11 early stop *********
+2024-02-23 12:32:20,560 P3402379 INFO Training finished.
+2024-02-23 12:32:20,560 P3402379 INFO Load best model: /cache/FuxiCTR/benchmark/checkpoints/criteo_x4_001_a5e05ce7/MaskNet_criteo_x4_001_018_ccc857cd.model
+2024-02-23 12:32:20,644 P3402379 INFO ****** Validation evaluation ******
+2024-02-23 12:32:36,461 P3402379 INFO [Metrics] AUC: 0.812951 - logloss: 0.439143
+2024-02-23 12:32:36,559 P3402379 INFO ******** Test evaluation ********
+2024-02-23 12:32:36,559 P3402379 INFO Loading datasets...
+2024-02-23 12:32:41,178 P3402379 INFO Test samples: total/4584062, blocks/1
+2024-02-23 12:32:41,178 P3402379 INFO Loading test data done.
+2024-02-23 12:32:57,223 P3402379 INFO [Metrics] AUC: 0.813420 - logloss: 0.438748
+
+```
diff --git a/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/environments.txt b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/environments.txt
new file mode 100644
index 00000000..b4567ace
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/environments.txt
@@ -0,0 +1,18 @@
+[Hardware]
+CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+GPU: Tesla V100 32G
+RAM: 755G
+
+[Software]
+CUDA: 10.2
+python: 3.7.10
+pytorch: 1.10.2+cu102
+pandas: 1.1.5
+numpy: 1.19.5
+scipy: 1.5.2
+sklearn: 0.22.1
+pyyaml: 6.0.1
+h5py: 2.8.0
+tqdm: 4.64.0
+keras_preprocessing: 1.1.2
+fuxictr: 2.2.0
\ No newline at end of file
diff --git a/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/results.csv b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/results.csv
new file mode 100644
index 00000000..4bfc6578
--- /dev/null
+++ b/ranking/ctr/MaskNet/MaskNet_criteo_x4_001/results.csv
@@ -0,0 +1 @@
+ 20240223-123257,[command] python run_expid.py --config Criteo_x4/MaskNet_criteo_x4_001/MaskNet_criteo_x4_tuner_config_06 --expid MaskNet_criteo_x4_001_018_ccc857cd --gpu 1,[exp_id] MaskNet_criteo_x4_001_018_ccc857cd,[dataset_id] criteo_x4_001_a5e05ce7,[train] N.A.,[val] AUC: 0.812951 - logloss: 0.439143,[test] AUC: 0.813420 - logloss: 0.438748
diff --git a/ranking/ctr/MaskNet/MaskNet_frappe_x1/README.md b/ranking/ctr/MaskNet/MaskNet_frappe_x1/README.md
index ed24bce7..ba9e9c86 100644
--- a/ranking/ctr/MaskNet/MaskNet_frappe_x1/README.md
+++ b/ranking/ctr/MaskNet/MaskNet_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the MaskNet model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pr
### Code
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [MaskNet](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/MaskNet.py).
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [MaskNet](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/MaskNet.py).
Running steps:
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/MaskNet/MaskNet_movielenslatest_x1/README.md b/ranking/ctr/MaskNet/MaskNet_movielenslatest_x1/README.md
index 284e8cdb..5b77dac5 100644
--- a/ranking/ctr/MaskNet/MaskNet_movielenslatest_x1/README.md
+++ b/ranking/ctr/MaskNet/MaskNet_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the MaskNet model on the MovielensLatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/mast
### Code
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [MaskNet](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/MaskNet.py).
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [MaskNet](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/MaskNet.py).
Running steps:
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/NFM/NFM_avazu_x1/README.md b/ranking/ctr/NFM/NFM_avazu_x1/README.md
index 5ab91f77..063885ec 100644
--- a/ranking/ctr/NFM/NFM_avazu_x1/README.md
+++ b/ranking/ctr/NFM/NFM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the NFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [NFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/NFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [NFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/NFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/NFM/NFM_avazu_x4_001/README.md b/ranking/ctr/NFM/NFM_avazu_x4_001/README.md
index 85cfcbd5..358b9204 100644
--- a/ranking/ctr/NFM/NFM_avazu_x4_001/README.md
+++ b/ranking/ctr/NFM/NFM_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the NFM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [NFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/NFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [NFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/NFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/NFM/NFM_avazu_x4_002/README.md b/ranking/ctr/NFM/NFM_avazu_x4_002/README.md
index d720a57d..98d2445d 100644
--- a/ranking/ctr/NFM/NFM_avazu_x4_002/README.md
+++ b/ranking/ctr/NFM/NFM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the NFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [NFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/NFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [NFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/NFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/NFM/NFM_criteo_x1/README.md b/ranking/ctr/NFM/NFM_criteo_x1/README.md
index f59ea96a..0e0df356 100644
--- a/ranking/ctr/NFM/NFM_criteo_x1/README.md
+++ b/ranking/ctr/NFM/NFM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the NFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [NFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/NFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [NFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/NFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/NFM/NFM_criteo_x4_001/README.md b/ranking/ctr/NFM/NFM_criteo_x4_001/README.md
index 5644c520..1988f8eb 100644
--- a/ranking/ctr/NFM/NFM_criteo_x4_001/README.md
+++ b/ranking/ctr/NFM/NFM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the NFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [NFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/NFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [NFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/NFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/NFM/NFM_criteo_x4_002/README.md b/ranking/ctr/NFM/NFM_criteo_x4_002/README.md
index d4cd55cb..2ca773b4 100644
--- a/ranking/ctr/NFM/NFM_criteo_x4_002/README.md
+++ b/ranking/ctr/NFM/NFM_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the NFM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [NFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/NFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [NFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/NFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/NFM/NFM_frappe_x1/README.md b/ranking/ctr/NFM/NFM_frappe_x1/README.md
index 91c87ea5..ac38996b 100644
--- a/ranking/ctr/NFM/NFM_frappe_x1/README.md
+++ b/ranking/ctr/NFM/NFM_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the NFM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [NFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/NFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [NFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/NFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/NFM/NFM_kkbox_x1/README.md b/ranking/ctr/NFM/NFM_kkbox_x1/README.md
index e5bd7fe2..721180ce 100644
--- a/ranking/ctr/NFM/NFM_kkbox_x1/README.md
+++ b/ranking/ctr/NFM/NFM_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the NFM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [NFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/NFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [NFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/NFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/NFM/NFM_movielenslatest_x1/README.md b/ranking/ctr/NFM/NFM_movielenslatest_x1/README.md
index 5b12cd36..00a038c5 100644
--- a/ranking/ctr/NFM/NFM_movielenslatest_x1/README.md
+++ b/ranking/ctr/NFM/NFM_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the NFM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [NFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/NFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [NFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/NFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/ONN/ONN_avazu_x4_001/README.md b/ranking/ctr/ONN/ONN_avazu_x4_001/README.md
index 5e59bb55..233a0e54 100644
--- a/ranking/ctr/ONN/ONN_avazu_x4_001/README.md
+++ b/ranking/ctr/ONN/ONN_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the ONN model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [ONN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/ONN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [ONN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/ONN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/ONN/ONN_avazu_x4_002/README.md b/ranking/ctr/ONN/ONN_avazu_x4_002/README.md
index 1f62e269..ab72138f 100644
--- a/ranking/ctr/ONN/ONN_avazu_x4_002/README.md
+++ b/ranking/ctr/ONN/ONN_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the ONN model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [ONN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/ONN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [ONN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/ONN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/ONN/ONN_criteo_x4_001/README.md b/ranking/ctr/ONN/ONN_criteo_x4_001/README.md
index 80bc0c6c..f43716ab 100644
--- a/ranking/ctr/ONN/ONN_criteo_x4_001/README.md
+++ b/ranking/ctr/ONN/ONN_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the ONN model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [ONN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/ONN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [ONN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/ONN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/ONN/ONN_criteo_x4_002/README.md b/ranking/ctr/ONN/ONN_criteo_x4_002/README.md
index 75fea119..467781e5 100644
--- a/ranking/ctr/ONN/ONN_criteo_x4_002/README.md
+++ b/ranking/ctr/ONN/ONN_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the ONN model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [ONN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/ONN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [ONN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/ONN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/ONN/ONN_kkbox_x1/README.md b/ranking/ctr/ONN/ONN_kkbox_x1/README.md
index 146dca29..4a4dcdea 100644
--- a/ranking/ctr/ONN/ONN_kkbox_x1/README.md
+++ b/ranking/ctr/ONN/ONN_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the ONN model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [ONN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/ONN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [ONN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/ONN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/PNN/IPNN_avazu_x1/README.md b/ranking/ctr/PNN/IPNN_avazu_x1/README.md
index 0238cd67..a5814fba 100644
--- a/ranking/ctr/PNN/IPNN_avazu_x1/README.md
+++ b/ranking/ctr/PNN/IPNN_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the PNN model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [PNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/PNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [PNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/PNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/PNN/IPNN_avazu_x4_001/README.md b/ranking/ctr/PNN/IPNN_avazu_x4_001/README.md
index 5ec785f9..1cfc7e90 100644
--- a/ranking/ctr/PNN/IPNN_avazu_x4_001/README.md
+++ b/ranking/ctr/PNN/IPNN_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the PNN model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [PNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/PNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [PNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/PNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/PNN/IPNN_avazu_x4_002/README.md b/ranking/ctr/PNN/IPNN_avazu_x4_002/README.md
index 3ff3c912..e5408599 100644
--- a/ranking/ctr/PNN/IPNN_avazu_x4_002/README.md
+++ b/ranking/ctr/PNN/IPNN_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the PNN model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [PNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/PNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [PNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/PNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/PNN/IPNN_criteo_x1/README.md b/ranking/ctr/PNN/IPNN_criteo_x1/README.md
index e30c5f8f..1e2df267 100644
--- a/ranking/ctr/PNN/IPNN_criteo_x1/README.md
+++ b/ranking/ctr/PNN/IPNN_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the PNN model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [PNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/PNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [PNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/PNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/PNN/IPNN_criteo_x4_001/README.md b/ranking/ctr/PNN/IPNN_criteo_x4_001/README.md
index 299f9bd1..6bf9d158 100644
--- a/ranking/ctr/PNN/IPNN_criteo_x4_001/README.md
+++ b/ranking/ctr/PNN/IPNN_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the PNN model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [PNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/PNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [PNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/PNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/PNN/IPNN_criteo_x4_002/README.md b/ranking/ctr/PNN/IPNN_criteo_x4_002/README.md
index a931a284..d205e376 100644
--- a/ranking/ctr/PNN/IPNN_criteo_x4_002/README.md
+++ b/ranking/ctr/PNN/IPNN_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the PNN model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [PNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/PNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [PNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/PNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/PNN/IPNN_frappe_x1/README.md b/ranking/ctr/PNN/IPNN_frappe_x1/README.md
index ab2fa264..07aade50 100644
--- a/ranking/ctr/PNN/IPNN_frappe_x1/README.md
+++ b/ranking/ctr/PNN/IPNN_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the PNN model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [PNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/PNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [PNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/PNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/PNN/IPNN_kkbox_x1/README.md b/ranking/ctr/PNN/IPNN_kkbox_x1/README.md
index 97bc5bbe..67637b64 100644
--- a/ranking/ctr/PNN/IPNN_kkbox_x1/README.md
+++ b/ranking/ctr/PNN/IPNN_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the PNN model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [PNN](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/PNN.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [PNN](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/PNN.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/PNN/IPNN_movielenslatest_x1/README.md b/ranking/ctr/PNN/IPNN_movielenslatest_x1/README.md
index 5dc88d9c..7585d47e 100644
--- a/ranking/ctr/PNN/IPNN_movielenslatest_x1/README.md
+++ b/ranking/ctr/PNN/IPNN_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the PNN model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [PNN](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/PNN.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [PNN](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/PNN.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/SAM/SAM_avazu_x1/README.md b/ranking/ctr/SAM/SAM_avazu_x1/README.md
index 5bf4f85c..1fb41c49 100644
--- a/ranking/ctr/SAM/SAM_avazu_x1/README.md
+++ b/ranking/ctr/SAM/SAM_avazu_x1/README.md
@@ -1,187 +1,187 @@
-## SAM_avazu_x1
-
-A hands-on guide to run the SAM model on the Avazu_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
- GPU: Tesla V100 32G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.2
- python: 3.6.4
- pytorch: 1.0.0
- pandas: 0.22.0
- numpy: 1.19.2
- scipy: 1.5.4
- sklearn: 0.22.1
- pyyaml: 5.4.1
- h5py: 2.8.0
- tqdm: 4.60.0
- fuxictr: 1.2.1
-
- ```
-
-### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu#Avazu_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [SAM](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/SAM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Avazu/Avazu_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [SAM_avazu_x1_tuner_config_01](./SAM_avazu_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd SAM_avazu_x1
- nohup python run_expid.py --config ./SAM_avazu_x1_tuner_config_01 --expid SAM_avazu_x1_012_1cecba8c --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.763179 | 0.367202 |
-
-
-### Logs
-```python
-2022-05-28 11:54:50,811 P80006 INFO {
- "aggregation": "concat",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Avazu/",
- "dataset_id": "avazu_x1_3fb65689",
- "debug": "False",
- "embedding_dim": "10",
- "embedding_regularizer": "0.01",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['feat_1', 'feat_2', 'feat_3', 'feat_4', 'feat_5', 'feat_6', 'feat_7', 'feat_8', 'feat_9', 'feat_10', 'feat_11', 'feat_12', 'feat_13', 'feat_14', 'feat_15', 'feat_16', 'feat_17', 'feat_18', 'feat_19', 'feat_20', 'feat_21', 'feat_22'], 'type': 'categorical'}]",
- "gpu": "3",
- "interaction_type": "SAM3A",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "SAM",
- "model_id": "SAM_avazu_x1_012_1cecba8c",
- "model_root": "./Avazu/SAM_avazu_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_dropout": "0.3",
- "net_regularizer": "0",
- "num_interaction_layers": "4",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Avazu/Avazu_x1/test.csv",
- "train_data": "../data/Avazu/Avazu_x1/train.csv",
- "use_hdf5": "True",
- "use_residual": "True",
- "valid_data": "../data/Avazu/Avazu_x1/valid.csv",
- "verbose": "0",
- "version": "pytorch"
-}
-2022-05-28 11:54:50,812 P80006 INFO Set up feature encoder...
-2022-05-28 11:54:50,812 P80006 INFO Load feature_map from json: ../data/Avazu/avazu_x1_3fb65689/feature_map.json
-2022-05-28 11:54:50,812 P80006 INFO Loading data...
-2022-05-28 11:54:50,813 P80006 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/train.h5
-2022-05-28 11:54:53,353 P80006 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/valid.h5
-2022-05-28 11:54:53,740 P80006 INFO Train samples: total/28300276, pos/4953382, neg/23346894, ratio/17.50%, blocks/1
-2022-05-28 11:54:53,740 P80006 INFO Validation samples: total/4042897, pos/678699, neg/3364198, ratio/16.79%, blocks/1
-2022-05-28 11:54:53,740 P80006 INFO Loading train data done.
-2022-05-28 11:55:00,146 P80006 INFO Total number of parameters: 13006371.
-2022-05-28 11:55:00,146 P80006 INFO Start training: 6910 batches/epoch
-2022-05-28 11:55:00,146 P80006 INFO ************ Epoch=1 start ************
-2022-05-28 12:05:05,187 P80006 INFO [Metrics] AUC: 0.736598 - logloss: 0.403137
-2022-05-28 12:05:05,191 P80006 INFO Save best model: monitor(max): 0.736598
-2022-05-28 12:05:05,477 P80006 INFO --- 6910/6910 batches finished ---
-2022-05-28 12:05:05,520 P80006 INFO Train loss: 0.432452
-2022-05-28 12:05:05,520 P80006 INFO ************ Epoch=1 end ************
-2022-05-28 12:15:07,944 P80006 INFO [Metrics] AUC: 0.738395 - logloss: 0.400699
-2022-05-28 12:15:07,946 P80006 INFO Save best model: monitor(max): 0.738395
-2022-05-28 12:15:08,009 P80006 INFO --- 6910/6910 batches finished ---
-2022-05-28 12:15:08,055 P80006 INFO Train loss: 0.428933
-2022-05-28 12:15:08,055 P80006 INFO ************ Epoch=2 end ************
-2022-05-28 12:25:10,134 P80006 INFO [Metrics] AUC: 0.736692 - logloss: 0.400102
-2022-05-28 12:25:10,137 P80006 INFO Monitor(max) STOP: 0.736692 !
-2022-05-28 12:25:10,137 P80006 INFO Reduce learning rate on plateau: 0.000100
-2022-05-28 12:25:10,137 P80006 INFO --- 6910/6910 batches finished ---
-2022-05-28 12:25:10,190 P80006 INFO Train loss: 0.429232
-2022-05-28 12:25:10,190 P80006 INFO ************ Epoch=3 end ************
-2022-05-28 12:35:10,171 P80006 INFO [Metrics] AUC: 0.744484 - logloss: 0.396703
-2022-05-28 12:35:10,173 P80006 INFO Save best model: monitor(max): 0.744484
-2022-05-28 12:35:10,236 P80006 INFO --- 6910/6910 batches finished ---
-2022-05-28 12:35:10,282 P80006 INFO Train loss: 0.406561
-2022-05-28 12:35:10,282 P80006 INFO ************ Epoch=4 end ************
-2022-05-28 12:45:08,190 P80006 INFO [Metrics] AUC: 0.745908 - logloss: 0.396222
-2022-05-28 12:45:08,193 P80006 INFO Save best model: monitor(max): 0.745908
-2022-05-28 12:45:08,260 P80006 INFO --- 6910/6910 batches finished ---
-2022-05-28 12:45:08,308 P80006 INFO Train loss: 0.406881
-2022-05-28 12:45:08,309 P80006 INFO ************ Epoch=5 end ************
-2022-05-28 12:55:05,921 P80006 INFO [Metrics] AUC: 0.745192 - logloss: 0.396466
-2022-05-28 12:55:05,923 P80006 INFO Monitor(max) STOP: 0.745192 !
-2022-05-28 12:55:05,923 P80006 INFO Reduce learning rate on plateau: 0.000010
-2022-05-28 12:55:05,923 P80006 INFO --- 6910/6910 batches finished ---
-2022-05-28 12:55:05,970 P80006 INFO Train loss: 0.407527
-2022-05-28 12:55:05,970 P80006 INFO ************ Epoch=6 end ************
-2022-05-28 13:05:01,874 P80006 INFO [Metrics] AUC: 0.747762 - logloss: 0.395219
-2022-05-28 13:05:01,876 P80006 INFO Save best model: monitor(max): 0.747762
-2022-05-28 13:05:01,948 P80006 INFO --- 6910/6910 batches finished ---
-2022-05-28 13:05:02,009 P80006 INFO Train loss: 0.398262
-2022-05-28 13:05:02,009 P80006 INFO ************ Epoch=7 end ************
-2022-05-28 13:14:42,182 P80006 INFO [Metrics] AUC: 0.745087 - logloss: 0.396685
-2022-05-28 13:14:42,184 P80006 INFO Monitor(max) STOP: 0.745087 !
-2022-05-28 13:14:42,184 P80006 INFO Reduce learning rate on plateau: 0.000001
-2022-05-28 13:14:42,184 P80006 INFO --- 6910/6910 batches finished ---
-2022-05-28 13:14:42,238 P80006 INFO Train loss: 0.394946
-2022-05-28 13:14:42,238 P80006 INFO ************ Epoch=8 end ************
-2022-05-28 13:24:50,543 P80006 INFO [Metrics] AUC: 0.744029 - logloss: 0.397360
-2022-05-28 13:24:50,545 P80006 INFO Monitor(max) STOP: 0.744029 !
-2022-05-28 13:24:50,545 P80006 INFO Reduce learning rate on plateau: 0.000001
-2022-05-28 13:24:50,545 P80006 INFO Early stopping at epoch=9
-2022-05-28 13:24:50,545 P80006 INFO --- 6910/6910 batches finished ---
-2022-05-28 13:24:50,600 P80006 INFO Train loss: 0.390888
-2022-05-28 13:24:50,600 P80006 INFO Training finished.
-2022-05-28 13:24:50,600 P80006 INFO Load best model: /cache/FuxiCTR/benchmarks/Avazu/SAM_avazu_x1/avazu_x1_3fb65689/SAM_avazu_x1_012_1cecba8c.model
-2022-05-28 13:24:54,920 P80006 INFO ****** Validation evaluation ******
-2022-05-28 13:25:08,572 P80006 INFO [Metrics] AUC: 0.747762 - logloss: 0.395219
-2022-05-28 13:25:08,651 P80006 INFO ******** Test evaluation ********
-2022-05-28 13:25:08,651 P80006 INFO Loading data...
-2022-05-28 13:25:08,652 P80006 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/test.h5
-2022-05-28 13:25:09,526 P80006 INFO Test samples: total/8085794, pos/1232985, neg/6852809, ratio/15.25%, blocks/1
-2022-05-28 13:25:09,526 P80006 INFO Loading test data done.
-2022-05-28 13:25:37,331 P80006 INFO [Metrics] AUC: 0.763179 - logloss: 0.367202
-
-```
+## SAM_avazu_x1
+
+A hands-on guide to run the SAM model on the Avazu_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.6.4
+ pytorch: 1.0.0
+ pandas: 0.22.0
+ numpy: 1.19.2
+ scipy: 1.5.4
+ sklearn: 0.22.1
+ pyyaml: 5.4.1
+ h5py: 2.8.0
+ tqdm: 4.60.0
+ fuxictr: 1.2.1
+
+ ```
+
+### Dataset
+Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu#Avazu_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [SAM](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/SAM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Avazu/Avazu_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [SAM_avazu_x1_tuner_config_01](./SAM_avazu_x1_tuner_config_01). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd SAM_avazu_x1
+ nohup python run_expid.py --config ./SAM_avazu_x1_tuner_config_01 --expid SAM_avazu_x1_012_1cecba8c --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.763179 | 0.367202 |
+
+
+### Logs
+```python
+2022-05-28 11:54:50,811 P80006 INFO {
+ "aggregation": "concat",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Avazu/",
+ "dataset_id": "avazu_x1_3fb65689",
+ "debug": "False",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.01",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['feat_1', 'feat_2', 'feat_3', 'feat_4', 'feat_5', 'feat_6', 'feat_7', 'feat_8', 'feat_9', 'feat_10', 'feat_11', 'feat_12', 'feat_13', 'feat_14', 'feat_15', 'feat_16', 'feat_17', 'feat_18', 'feat_19', 'feat_20', 'feat_21', 'feat_22'], 'type': 'categorical'}]",
+ "gpu": "3",
+ "interaction_type": "SAM3A",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "SAM",
+ "model_id": "SAM_avazu_x1_012_1cecba8c",
+ "model_root": "./Avazu/SAM_avazu_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0.3",
+ "net_regularizer": "0",
+ "num_interaction_layers": "4",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Avazu/Avazu_x1/test.csv",
+ "train_data": "../data/Avazu/Avazu_x1/train.csv",
+ "use_hdf5": "True",
+ "use_residual": "True",
+ "valid_data": "../data/Avazu/Avazu_x1/valid.csv",
+ "verbose": "0",
+ "version": "pytorch"
+}
+2022-05-28 11:54:50,812 P80006 INFO Set up feature encoder...
+2022-05-28 11:54:50,812 P80006 INFO Load feature_map from json: ../data/Avazu/avazu_x1_3fb65689/feature_map.json
+2022-05-28 11:54:50,812 P80006 INFO Loading data...
+2022-05-28 11:54:50,813 P80006 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/train.h5
+2022-05-28 11:54:53,353 P80006 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/valid.h5
+2022-05-28 11:54:53,740 P80006 INFO Train samples: total/28300276, pos/4953382, neg/23346894, ratio/17.50%, blocks/1
+2022-05-28 11:54:53,740 P80006 INFO Validation samples: total/4042897, pos/678699, neg/3364198, ratio/16.79%, blocks/1
+2022-05-28 11:54:53,740 P80006 INFO Loading train data done.
+2022-05-28 11:55:00,146 P80006 INFO Total number of parameters: 13006371.
+2022-05-28 11:55:00,146 P80006 INFO Start training: 6910 batches/epoch
+2022-05-28 11:55:00,146 P80006 INFO ************ Epoch=1 start ************
+2022-05-28 12:05:05,187 P80006 INFO [Metrics] AUC: 0.736598 - logloss: 0.403137
+2022-05-28 12:05:05,191 P80006 INFO Save best model: monitor(max): 0.736598
+2022-05-28 12:05:05,477 P80006 INFO --- 6910/6910 batches finished ---
+2022-05-28 12:05:05,520 P80006 INFO Train loss: 0.432452
+2022-05-28 12:05:05,520 P80006 INFO ************ Epoch=1 end ************
+2022-05-28 12:15:07,944 P80006 INFO [Metrics] AUC: 0.738395 - logloss: 0.400699
+2022-05-28 12:15:07,946 P80006 INFO Save best model: monitor(max): 0.738395
+2022-05-28 12:15:08,009 P80006 INFO --- 6910/6910 batches finished ---
+2022-05-28 12:15:08,055 P80006 INFO Train loss: 0.428933
+2022-05-28 12:15:08,055 P80006 INFO ************ Epoch=2 end ************
+2022-05-28 12:25:10,134 P80006 INFO [Metrics] AUC: 0.736692 - logloss: 0.400102
+2022-05-28 12:25:10,137 P80006 INFO Monitor(max) STOP: 0.736692 !
+2022-05-28 12:25:10,137 P80006 INFO Reduce learning rate on plateau: 0.000100
+2022-05-28 12:25:10,137 P80006 INFO --- 6910/6910 batches finished ---
+2022-05-28 12:25:10,190 P80006 INFO Train loss: 0.429232
+2022-05-28 12:25:10,190 P80006 INFO ************ Epoch=3 end ************
+2022-05-28 12:35:10,171 P80006 INFO [Metrics] AUC: 0.744484 - logloss: 0.396703
+2022-05-28 12:35:10,173 P80006 INFO Save best model: monitor(max): 0.744484
+2022-05-28 12:35:10,236 P80006 INFO --- 6910/6910 batches finished ---
+2022-05-28 12:35:10,282 P80006 INFO Train loss: 0.406561
+2022-05-28 12:35:10,282 P80006 INFO ************ Epoch=4 end ************
+2022-05-28 12:45:08,190 P80006 INFO [Metrics] AUC: 0.745908 - logloss: 0.396222
+2022-05-28 12:45:08,193 P80006 INFO Save best model: monitor(max): 0.745908
+2022-05-28 12:45:08,260 P80006 INFO --- 6910/6910 batches finished ---
+2022-05-28 12:45:08,308 P80006 INFO Train loss: 0.406881
+2022-05-28 12:45:08,309 P80006 INFO ************ Epoch=5 end ************
+2022-05-28 12:55:05,921 P80006 INFO [Metrics] AUC: 0.745192 - logloss: 0.396466
+2022-05-28 12:55:05,923 P80006 INFO Monitor(max) STOP: 0.745192 !
+2022-05-28 12:55:05,923 P80006 INFO Reduce learning rate on plateau: 0.000010
+2022-05-28 12:55:05,923 P80006 INFO --- 6910/6910 batches finished ---
+2022-05-28 12:55:05,970 P80006 INFO Train loss: 0.407527
+2022-05-28 12:55:05,970 P80006 INFO ************ Epoch=6 end ************
+2022-05-28 13:05:01,874 P80006 INFO [Metrics] AUC: 0.747762 - logloss: 0.395219
+2022-05-28 13:05:01,876 P80006 INFO Save best model: monitor(max): 0.747762
+2022-05-28 13:05:01,948 P80006 INFO --- 6910/6910 batches finished ---
+2022-05-28 13:05:02,009 P80006 INFO Train loss: 0.398262
+2022-05-28 13:05:02,009 P80006 INFO ************ Epoch=7 end ************
+2022-05-28 13:14:42,182 P80006 INFO [Metrics] AUC: 0.745087 - logloss: 0.396685
+2022-05-28 13:14:42,184 P80006 INFO Monitor(max) STOP: 0.745087 !
+2022-05-28 13:14:42,184 P80006 INFO Reduce learning rate on plateau: 0.000001
+2022-05-28 13:14:42,184 P80006 INFO --- 6910/6910 batches finished ---
+2022-05-28 13:14:42,238 P80006 INFO Train loss: 0.394946
+2022-05-28 13:14:42,238 P80006 INFO ************ Epoch=8 end ************
+2022-05-28 13:24:50,543 P80006 INFO [Metrics] AUC: 0.744029 - logloss: 0.397360
+2022-05-28 13:24:50,545 P80006 INFO Monitor(max) STOP: 0.744029 !
+2022-05-28 13:24:50,545 P80006 INFO Reduce learning rate on plateau: 0.000001
+2022-05-28 13:24:50,545 P80006 INFO Early stopping at epoch=9
+2022-05-28 13:24:50,545 P80006 INFO --- 6910/6910 batches finished ---
+2022-05-28 13:24:50,600 P80006 INFO Train loss: 0.390888
+2022-05-28 13:24:50,600 P80006 INFO Training finished.
+2022-05-28 13:24:50,600 P80006 INFO Load best model: /cache/FuxiCTR/benchmarks/Avazu/SAM_avazu_x1/avazu_x1_3fb65689/SAM_avazu_x1_012_1cecba8c.model
+2022-05-28 13:24:54,920 P80006 INFO ****** Validation evaluation ******
+2022-05-28 13:25:08,572 P80006 INFO [Metrics] AUC: 0.747762 - logloss: 0.395219
+2022-05-28 13:25:08,651 P80006 INFO ******** Test evaluation ********
+2022-05-28 13:25:08,651 P80006 INFO Loading data...
+2022-05-28 13:25:08,652 P80006 INFO Loading data from h5: ../data/Avazu/avazu_x1_3fb65689/test.h5
+2022-05-28 13:25:09,526 P80006 INFO Test samples: total/8085794, pos/1232985, neg/6852809, ratio/15.25%, blocks/1
+2022-05-28 13:25:09,526 P80006 INFO Loading test data done.
+2022-05-28 13:25:37,331 P80006 INFO [Metrics] AUC: 0.763179 - logloss: 0.367202
+
+```
diff --git a/ranking/ctr/SAM/SAM_criteo_x1/README.md b/ranking/ctr/SAM/SAM_criteo_x1/README.md
index c4b0ebbe..5fe1fb37 100644
--- a/ranking/ctr/SAM/SAM_criteo_x1/README.md
+++ b/ranking/ctr/SAM/SAM_criteo_x1/README.md
@@ -1,231 +1,231 @@
-## SAM_criteo_x1
-
-A hands-on guide to run the SAM model on the Criteo_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
- GPU: Tesla V100 32G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 10.2
- python: 3.6.4
- pytorch: 1.0.0
- pandas: 0.22.0
- numpy: 1.19.2
- scipy: 1.5.4
- sklearn: 0.22.1
- pyyaml: 5.4.1
- h5py: 2.8.0
- tqdm: 4.60.0
- fuxictr: 1.2.1
-
- ```
-
-### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo#Criteo_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [SAM](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/SAM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Criteo/Criteo_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [SAM_criteo_x1_tuner_config_03](./SAM_criteo_x1_tuner_config_03). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd SAM_criteo_x1
- nohup python run_expid.py --config ./SAM_criteo_x1_tuner_config_03 --expid SAM_criteo_x1_012_55e25f89 --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.813098 | 0.438855 |
-
-
-### Logs
-```python
-2022-05-31 10:38:41,600 P69477 INFO {
- "aggregation": "weighted_pooling",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Criteo/",
- "dataset_id": "criteo_x1_7b681156",
- "debug": "False",
- "embedding_dim": "10",
- "embedding_regularizer": "1e-05",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'type': 'numeric'}, {'active': True, 'dtype': 'float', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
- "gpu": "3",
- "interaction_type": "SAM3A",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "SAM",
- "model_id": "SAM_criteo_x1_012_55e25f89",
- "model_root": "./Criteo/SAM_criteo_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_dropout": "0",
- "net_regularizer": "0",
- "num_interaction_layers": "5",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Criteo/Criteo_x1/test.csv",
- "train_data": "../data/Criteo/Criteo_x1/train.csv",
- "use_hdf5": "True",
- "use_residual": "True",
- "valid_data": "../data/Criteo/Criteo_x1/valid.csv",
- "verbose": "0",
- "version": "pytorch"
-}
-2022-05-31 10:38:41,601 P69477 INFO Set up feature encoder...
-2022-05-31 10:38:41,601 P69477 INFO Load feature_map from json: ../data/Criteo/criteo_x1_7b681156/feature_map.json
-2022-05-31 10:38:41,601 P69477 INFO Loading data...
-2022-05-31 10:38:41,603 P69477 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/train.h5
-2022-05-31 10:38:46,548 P69477 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/valid.h5
-2022-05-31 10:38:47,776 P69477 INFO Train samples: total/33003326, pos/8456369, neg/24546957, ratio/25.62%, blocks/1
-2022-05-31 10:38:47,776 P69477 INFO Validation samples: total/8250124, pos/2114300, neg/6135824, ratio/25.63%, blocks/1
-2022-05-31 10:38:47,776 P69477 INFO Loading train data done.
-2022-05-31 10:38:53,726 P69477 INFO Total number of parameters: 20940260.
-2022-05-31 10:38:53,726 P69477 INFO Start training: 8058 batches/epoch
-2022-05-31 10:38:53,726 P69477 INFO ************ Epoch=1 start ************
-2022-05-31 11:00:13,564 P69477 INFO [Metrics] AUC: 0.801662 - logloss: 0.449289
-2022-05-31 11:00:13,565 P69477 INFO Save best model: monitor(max): 0.801662
-2022-05-31 11:00:13,852 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 11:00:13,900 P69477 INFO Train loss: 0.485520
-2022-05-31 11:00:13,900 P69477 INFO ************ Epoch=1 end ************
-2022-05-31 11:21:30,684 P69477 INFO [Metrics] AUC: 0.805122 - logloss: 0.446276
-2022-05-31 11:21:30,686 P69477 INFO Save best model: monitor(max): 0.805122
-2022-05-31 11:21:30,787 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 11:21:30,824 P69477 INFO Train loss: 0.457815
-2022-05-31 11:21:30,825 P69477 INFO ************ Epoch=2 end ************
-2022-05-31 11:42:45,886 P69477 INFO [Metrics] AUC: 0.806512 - logloss: 0.444994
-2022-05-31 11:42:45,887 P69477 INFO Save best model: monitor(max): 0.806512
-2022-05-31 11:42:45,987 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 11:42:46,025 P69477 INFO Train loss: 0.455675
-2022-05-31 11:42:46,026 P69477 INFO ************ Epoch=3 end ************
-2022-05-31 12:04:03,454 P69477 INFO [Metrics] AUC: 0.807335 - logloss: 0.444205
-2022-05-31 12:04:03,455 P69477 INFO Save best model: monitor(max): 0.807335
-2022-05-31 12:04:03,558 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 12:04:03,598 P69477 INFO Train loss: 0.454515
-2022-05-31 12:04:03,598 P69477 INFO ************ Epoch=4 end ************
-2022-05-31 12:25:20,165 P69477 INFO [Metrics] AUC: 0.807930 - logloss: 0.443640
-2022-05-31 12:25:20,166 P69477 INFO Save best model: monitor(max): 0.807930
-2022-05-31 12:25:20,256 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 12:25:20,297 P69477 INFO Train loss: 0.453823
-2022-05-31 12:25:20,297 P69477 INFO ************ Epoch=5 end ************
-2022-05-31 12:46:33,776 P69477 INFO [Metrics] AUC: 0.808102 - logloss: 0.443469
-2022-05-31 12:46:33,778 P69477 INFO Save best model: monitor(max): 0.808102
-2022-05-31 12:46:33,869 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 12:46:33,911 P69477 INFO Train loss: 0.453343
-2022-05-31 12:46:33,911 P69477 INFO ************ Epoch=6 end ************
-2022-05-31 13:07:43,227 P69477 INFO [Metrics] AUC: 0.808464 - logloss: 0.443094
-2022-05-31 13:07:43,228 P69477 INFO Save best model: monitor(max): 0.808464
-2022-05-31 13:07:43,325 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 13:07:43,363 P69477 INFO Train loss: 0.452970
-2022-05-31 13:07:43,364 P69477 INFO ************ Epoch=7 end ************
-2022-05-31 13:28:50,201 P69477 INFO [Metrics] AUC: 0.808635 - logloss: 0.442933
-2022-05-31 13:28:50,203 P69477 INFO Save best model: monitor(max): 0.808635
-2022-05-31 13:28:50,307 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 13:28:50,349 P69477 INFO Train loss: 0.452673
-2022-05-31 13:28:50,349 P69477 INFO ************ Epoch=8 end ************
-2022-05-31 13:49:57,226 P69477 INFO [Metrics] AUC: 0.808637 - logloss: 0.442973
-2022-05-31 13:49:57,227 P69477 INFO Save best model: monitor(max): 0.808637
-2022-05-31 13:49:57,342 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 13:49:57,379 P69477 INFO Train loss: 0.452444
-2022-05-31 13:49:57,379 P69477 INFO ************ Epoch=9 end ************
-2022-05-31 14:11:03,305 P69477 INFO [Metrics] AUC: 0.808924 - logloss: 0.442759
-2022-05-31 14:11:03,306 P69477 INFO Save best model: monitor(max): 0.808924
-2022-05-31 14:11:03,396 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 14:11:03,435 P69477 INFO Train loss: 0.452228
-2022-05-31 14:11:03,435 P69477 INFO ************ Epoch=10 end ************
-2022-05-31 14:32:07,419 P69477 INFO [Metrics] AUC: 0.808976 - logloss: 0.442699
-2022-05-31 14:32:07,421 P69477 INFO Save best model: monitor(max): 0.808976
-2022-05-31 14:32:07,510 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 14:32:07,548 P69477 INFO Train loss: 0.452068
-2022-05-31 14:32:07,548 P69477 INFO ************ Epoch=11 end ************
-2022-05-31 14:53:11,037 P69477 INFO [Metrics] AUC: 0.809002 - logloss: 0.442607
-2022-05-31 14:53:11,039 P69477 INFO Save best model: monitor(max): 0.809002
-2022-05-31 14:53:11,136 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 14:53:11,175 P69477 INFO Train loss: 0.451914
-2022-05-31 14:53:11,175 P69477 INFO ************ Epoch=12 end ************
-2022-05-31 15:14:14,645 P69477 INFO [Metrics] AUC: 0.808964 - logloss: 0.442691
-2022-05-31 15:14:14,647 P69477 INFO Monitor(max) STOP: 0.808964 !
-2022-05-31 15:14:14,647 P69477 INFO Reduce learning rate on plateau: 0.000100
-2022-05-31 15:14:14,647 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 15:14:14,689 P69477 INFO Train loss: 0.451778
-2022-05-31 15:14:14,690 P69477 INFO ************ Epoch=13 end ************
-2022-05-31 15:35:17,365 P69477 INFO [Metrics] AUC: 0.812336 - logloss: 0.439588
-2022-05-31 15:35:17,366 P69477 INFO Save best model: monitor(max): 0.812336
-2022-05-31 15:35:17,456 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 15:35:17,494 P69477 INFO Train loss: 0.441169
-2022-05-31 15:35:17,494 P69477 INFO ************ Epoch=14 end ************
-2022-05-31 15:56:20,198 P69477 INFO [Metrics] AUC: 0.812733 - logloss: 0.439322
-2022-05-31 15:56:20,199 P69477 INFO Save best model: monitor(max): 0.812733
-2022-05-31 15:56:20,287 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 15:56:20,327 P69477 INFO Train loss: 0.437275
-2022-05-31 15:56:20,327 P69477 INFO ************ Epoch=15 end ************
-2022-05-31 16:17:22,768 P69477 INFO [Metrics] AUC: 0.812786 - logloss: 0.439343
-2022-05-31 16:17:22,769 P69477 INFO Save best model: monitor(max): 0.812786
-2022-05-31 16:17:22,856 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 16:17:22,897 P69477 INFO Train loss: 0.435485
-2022-05-31 16:17:22,897 P69477 INFO ************ Epoch=16 end ************
-2022-05-31 16:38:24,290 P69477 INFO [Metrics] AUC: 0.812564 - logloss: 0.439558
-2022-05-31 16:38:24,291 P69477 INFO Monitor(max) STOP: 0.812564 !
-2022-05-31 16:38:24,291 P69477 INFO Reduce learning rate on plateau: 0.000010
-2022-05-31 16:38:24,291 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 16:38:24,329 P69477 INFO Train loss: 0.434076
-2022-05-31 16:38:24,329 P69477 INFO ************ Epoch=17 end ************
-2022-05-31 16:59:25,511 P69477 INFO [Metrics] AUC: 0.811779 - logloss: 0.440918
-2022-05-31 16:59:25,512 P69477 INFO Monitor(max) STOP: 0.811779 !
-2022-05-31 16:59:25,512 P69477 INFO Reduce learning rate on plateau: 0.000001
-2022-05-31 16:59:25,512 P69477 INFO Early stopping at epoch=18
-2022-05-31 16:59:25,512 P69477 INFO --- 8058/8058 batches finished ---
-2022-05-31 16:59:25,555 P69477 INFO Train loss: 0.429269
-2022-05-31 16:59:25,555 P69477 INFO Training finished.
-2022-05-31 16:59:25,555 P69477 INFO Load best model: /cache/FuxiCTR/benchmarks/Criteo/SAM_criteo_x1/criteo_x1_7b681156/SAM_criteo_x1_012_55e25f89.model
-2022-05-31 16:59:29,433 P69477 INFO ****** Validation evaluation ******
-2022-05-31 17:00:07,391 P69477 INFO [Metrics] AUC: 0.812786 - logloss: 0.439343
-2022-05-31 17:00:07,467 P69477 INFO ******** Test evaluation ********
-2022-05-31 17:00:07,468 P69477 INFO Loading data...
-2022-05-31 17:00:07,468 P69477 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/test.h5
-2022-05-31 17:00:08,300 P69477 INFO Test samples: total/4587167, pos/1174769, neg/3412398, ratio/25.61%, blocks/1
-2022-05-31 17:00:08,301 P69477 INFO Loading test data done.
-2022-05-31 17:00:30,908 P69477 INFO [Metrics] AUC: 0.813098 - logloss: 0.438855
-
-```
+## SAM_criteo_x1
+
+A hands-on guide to run the SAM model on the Criteo_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) Gold 6278C CPU @ 2.60GHz
+ GPU: Tesla V100 32G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 10.2
+ python: 3.6.4
+ pytorch: 1.0.0
+ pandas: 0.22.0
+ numpy: 1.19.2
+ scipy: 1.5.4
+ sklearn: 0.22.1
+ pyyaml: 5.4.1
+ h5py: 2.8.0
+ tqdm: 4.60.0
+ fuxictr: 1.2.1
+
+ ```
+
+### Dataset
+Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo#Criteo_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [SAM](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/SAM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Criteo/Criteo_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [SAM_criteo_x1_tuner_config_03](./SAM_criteo_x1_tuner_config_03). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd SAM_criteo_x1
+ nohup python run_expid.py --config ./SAM_criteo_x1_tuner_config_03 --expid SAM_criteo_x1_012_55e25f89 --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.813098 | 0.438855 |
+
+
+### Logs
+```python
+2022-05-31 10:38:41,600 P69477 INFO {
+ "aggregation": "weighted_pooling",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Criteo/",
+ "dataset_id": "criteo_x1_7b681156",
+ "debug": "False",
+ "embedding_dim": "10",
+ "embedding_regularizer": "1e-05",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['I1', 'I2', 'I3', 'I4', 'I5', 'I6', 'I7', 'I8', 'I9', 'I10', 'I11', 'I12', 'I13'], 'type': 'numeric'}, {'active': True, 'dtype': 'float', 'name': ['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26'], 'type': 'categorical'}]",
+ "gpu": "3",
+ "interaction_type": "SAM3A",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "SAM",
+ "model_id": "SAM_criteo_x1_012_55e25f89",
+ "model_root": "./Criteo/SAM_criteo_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0",
+ "net_regularizer": "0",
+ "num_interaction_layers": "5",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Criteo/Criteo_x1/test.csv",
+ "train_data": "../data/Criteo/Criteo_x1/train.csv",
+ "use_hdf5": "True",
+ "use_residual": "True",
+ "valid_data": "../data/Criteo/Criteo_x1/valid.csv",
+ "verbose": "0",
+ "version": "pytorch"
+}
+2022-05-31 10:38:41,601 P69477 INFO Set up feature encoder...
+2022-05-31 10:38:41,601 P69477 INFO Load feature_map from json: ../data/Criteo/criteo_x1_7b681156/feature_map.json
+2022-05-31 10:38:41,601 P69477 INFO Loading data...
+2022-05-31 10:38:41,603 P69477 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/train.h5
+2022-05-31 10:38:46,548 P69477 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/valid.h5
+2022-05-31 10:38:47,776 P69477 INFO Train samples: total/33003326, pos/8456369, neg/24546957, ratio/25.62%, blocks/1
+2022-05-31 10:38:47,776 P69477 INFO Validation samples: total/8250124, pos/2114300, neg/6135824, ratio/25.63%, blocks/1
+2022-05-31 10:38:47,776 P69477 INFO Loading train data done.
+2022-05-31 10:38:53,726 P69477 INFO Total number of parameters: 20940260.
+2022-05-31 10:38:53,726 P69477 INFO Start training: 8058 batches/epoch
+2022-05-31 10:38:53,726 P69477 INFO ************ Epoch=1 start ************
+2022-05-31 11:00:13,564 P69477 INFO [Metrics] AUC: 0.801662 - logloss: 0.449289
+2022-05-31 11:00:13,565 P69477 INFO Save best model: monitor(max): 0.801662
+2022-05-31 11:00:13,852 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 11:00:13,900 P69477 INFO Train loss: 0.485520
+2022-05-31 11:00:13,900 P69477 INFO ************ Epoch=1 end ************
+2022-05-31 11:21:30,684 P69477 INFO [Metrics] AUC: 0.805122 - logloss: 0.446276
+2022-05-31 11:21:30,686 P69477 INFO Save best model: monitor(max): 0.805122
+2022-05-31 11:21:30,787 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 11:21:30,824 P69477 INFO Train loss: 0.457815
+2022-05-31 11:21:30,825 P69477 INFO ************ Epoch=2 end ************
+2022-05-31 11:42:45,886 P69477 INFO [Metrics] AUC: 0.806512 - logloss: 0.444994
+2022-05-31 11:42:45,887 P69477 INFO Save best model: monitor(max): 0.806512
+2022-05-31 11:42:45,987 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 11:42:46,025 P69477 INFO Train loss: 0.455675
+2022-05-31 11:42:46,026 P69477 INFO ************ Epoch=3 end ************
+2022-05-31 12:04:03,454 P69477 INFO [Metrics] AUC: 0.807335 - logloss: 0.444205
+2022-05-31 12:04:03,455 P69477 INFO Save best model: monitor(max): 0.807335
+2022-05-31 12:04:03,558 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 12:04:03,598 P69477 INFO Train loss: 0.454515
+2022-05-31 12:04:03,598 P69477 INFO ************ Epoch=4 end ************
+2022-05-31 12:25:20,165 P69477 INFO [Metrics] AUC: 0.807930 - logloss: 0.443640
+2022-05-31 12:25:20,166 P69477 INFO Save best model: monitor(max): 0.807930
+2022-05-31 12:25:20,256 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 12:25:20,297 P69477 INFO Train loss: 0.453823
+2022-05-31 12:25:20,297 P69477 INFO ************ Epoch=5 end ************
+2022-05-31 12:46:33,776 P69477 INFO [Metrics] AUC: 0.808102 - logloss: 0.443469
+2022-05-31 12:46:33,778 P69477 INFO Save best model: monitor(max): 0.808102
+2022-05-31 12:46:33,869 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 12:46:33,911 P69477 INFO Train loss: 0.453343
+2022-05-31 12:46:33,911 P69477 INFO ************ Epoch=6 end ************
+2022-05-31 13:07:43,227 P69477 INFO [Metrics] AUC: 0.808464 - logloss: 0.443094
+2022-05-31 13:07:43,228 P69477 INFO Save best model: monitor(max): 0.808464
+2022-05-31 13:07:43,325 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 13:07:43,363 P69477 INFO Train loss: 0.452970
+2022-05-31 13:07:43,364 P69477 INFO ************ Epoch=7 end ************
+2022-05-31 13:28:50,201 P69477 INFO [Metrics] AUC: 0.808635 - logloss: 0.442933
+2022-05-31 13:28:50,203 P69477 INFO Save best model: monitor(max): 0.808635
+2022-05-31 13:28:50,307 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 13:28:50,349 P69477 INFO Train loss: 0.452673
+2022-05-31 13:28:50,349 P69477 INFO ************ Epoch=8 end ************
+2022-05-31 13:49:57,226 P69477 INFO [Metrics] AUC: 0.808637 - logloss: 0.442973
+2022-05-31 13:49:57,227 P69477 INFO Save best model: monitor(max): 0.808637
+2022-05-31 13:49:57,342 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 13:49:57,379 P69477 INFO Train loss: 0.452444
+2022-05-31 13:49:57,379 P69477 INFO ************ Epoch=9 end ************
+2022-05-31 14:11:03,305 P69477 INFO [Metrics] AUC: 0.808924 - logloss: 0.442759
+2022-05-31 14:11:03,306 P69477 INFO Save best model: monitor(max): 0.808924
+2022-05-31 14:11:03,396 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 14:11:03,435 P69477 INFO Train loss: 0.452228
+2022-05-31 14:11:03,435 P69477 INFO ************ Epoch=10 end ************
+2022-05-31 14:32:07,419 P69477 INFO [Metrics] AUC: 0.808976 - logloss: 0.442699
+2022-05-31 14:32:07,421 P69477 INFO Save best model: monitor(max): 0.808976
+2022-05-31 14:32:07,510 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 14:32:07,548 P69477 INFO Train loss: 0.452068
+2022-05-31 14:32:07,548 P69477 INFO ************ Epoch=11 end ************
+2022-05-31 14:53:11,037 P69477 INFO [Metrics] AUC: 0.809002 - logloss: 0.442607
+2022-05-31 14:53:11,039 P69477 INFO Save best model: monitor(max): 0.809002
+2022-05-31 14:53:11,136 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 14:53:11,175 P69477 INFO Train loss: 0.451914
+2022-05-31 14:53:11,175 P69477 INFO ************ Epoch=12 end ************
+2022-05-31 15:14:14,645 P69477 INFO [Metrics] AUC: 0.808964 - logloss: 0.442691
+2022-05-31 15:14:14,647 P69477 INFO Monitor(max) STOP: 0.808964 !
+2022-05-31 15:14:14,647 P69477 INFO Reduce learning rate on plateau: 0.000100
+2022-05-31 15:14:14,647 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 15:14:14,689 P69477 INFO Train loss: 0.451778
+2022-05-31 15:14:14,690 P69477 INFO ************ Epoch=13 end ************
+2022-05-31 15:35:17,365 P69477 INFO [Metrics] AUC: 0.812336 - logloss: 0.439588
+2022-05-31 15:35:17,366 P69477 INFO Save best model: monitor(max): 0.812336
+2022-05-31 15:35:17,456 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 15:35:17,494 P69477 INFO Train loss: 0.441169
+2022-05-31 15:35:17,494 P69477 INFO ************ Epoch=14 end ************
+2022-05-31 15:56:20,198 P69477 INFO [Metrics] AUC: 0.812733 - logloss: 0.439322
+2022-05-31 15:56:20,199 P69477 INFO Save best model: monitor(max): 0.812733
+2022-05-31 15:56:20,287 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 15:56:20,327 P69477 INFO Train loss: 0.437275
+2022-05-31 15:56:20,327 P69477 INFO ************ Epoch=15 end ************
+2022-05-31 16:17:22,768 P69477 INFO [Metrics] AUC: 0.812786 - logloss: 0.439343
+2022-05-31 16:17:22,769 P69477 INFO Save best model: monitor(max): 0.812786
+2022-05-31 16:17:22,856 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 16:17:22,897 P69477 INFO Train loss: 0.435485
+2022-05-31 16:17:22,897 P69477 INFO ************ Epoch=16 end ************
+2022-05-31 16:38:24,290 P69477 INFO [Metrics] AUC: 0.812564 - logloss: 0.439558
+2022-05-31 16:38:24,291 P69477 INFO Monitor(max) STOP: 0.812564 !
+2022-05-31 16:38:24,291 P69477 INFO Reduce learning rate on plateau: 0.000010
+2022-05-31 16:38:24,291 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 16:38:24,329 P69477 INFO Train loss: 0.434076
+2022-05-31 16:38:24,329 P69477 INFO ************ Epoch=17 end ************
+2022-05-31 16:59:25,511 P69477 INFO [Metrics] AUC: 0.811779 - logloss: 0.440918
+2022-05-31 16:59:25,512 P69477 INFO Monitor(max) STOP: 0.811779 !
+2022-05-31 16:59:25,512 P69477 INFO Reduce learning rate on plateau: 0.000001
+2022-05-31 16:59:25,512 P69477 INFO Early stopping at epoch=18
+2022-05-31 16:59:25,512 P69477 INFO --- 8058/8058 batches finished ---
+2022-05-31 16:59:25,555 P69477 INFO Train loss: 0.429269
+2022-05-31 16:59:25,555 P69477 INFO Training finished.
+2022-05-31 16:59:25,555 P69477 INFO Load best model: /cache/FuxiCTR/benchmarks/Criteo/SAM_criteo_x1/criteo_x1_7b681156/SAM_criteo_x1_012_55e25f89.model
+2022-05-31 16:59:29,433 P69477 INFO ****** Validation evaluation ******
+2022-05-31 17:00:07,391 P69477 INFO [Metrics] AUC: 0.812786 - logloss: 0.439343
+2022-05-31 17:00:07,467 P69477 INFO ******** Test evaluation ********
+2022-05-31 17:00:07,468 P69477 INFO Loading data...
+2022-05-31 17:00:07,468 P69477 INFO Loading data from h5: ../data/Criteo/criteo_x1_7b681156/test.h5
+2022-05-31 17:00:08,300 P69477 INFO Test samples: total/4587167, pos/1174769, neg/3412398, ratio/25.61%, blocks/1
+2022-05-31 17:00:08,301 P69477 INFO Loading test data done.
+2022-05-31 17:00:30,908 P69477 INFO [Metrics] AUC: 0.813098 - logloss: 0.438855
+
+```
diff --git a/ranking/ctr/SAM/SAM_frappe_x1/README.md b/ranking/ctr/SAM/SAM_frappe_x1/README.md
index 837c6f7b..036a0cb9 100644
--- a/ranking/ctr/SAM/SAM_frappe_x1/README.md
+++ b/ranking/ctr/SAM/SAM_frappe_x1/README.md
@@ -1,370 +1,370 @@
-## SAM_frappe_x1
-
-A hands-on guide to run the SAM model on the Frappe_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 11.4
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.2.1
- ```
-
-### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe#Frappe_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [SAM](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/SAM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Frappe/Frappe_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [SAM_frappe_x1_tuner_config_02](./SAM_frappe_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd SAM_frappe_x1
- nohup python run_expid.py --config ./SAM_frappe_x1_tuner_config_02 --expid SAM_frappe_x1_005_1368acce --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.980120 | 0.173521 |
-
-
-### Logs
-```python
-2022-04-12 16:14:27,112 P18285 INFO {
- "aggregation": "concat",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Frappe/",
- "dataset_id": "frappe_x1_04e961e9",
- "debug": "False",
- "embedding_dim": "10",
- "embedding_regularizer": "0.05",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user', 'item', 'daytime', 'weekday', 'isweekend', 'homework', 'cost', 'weather', 'country', 'city'], 'type': 'categorical'}]",
- "gpu": "0",
- "interaction_type": "SAM3A",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "SAM",
- "model_id": "SAM_frappe_x1_005_1368acce",
- "model_root": "./Frappe/SAM_frappe_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_dropout": "0.2",
- "net_regularizer": "0",
- "num_interaction_layers": "4",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Frappe/Frappe_x1/test.csv",
- "train_data": "../data/Frappe/Frappe_x1/train.csv",
- "use_hdf5": "True",
- "use_residual": "True",
- "valid_data": "../data/Frappe/Frappe_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch"
-}
-2022-04-12 16:14:27,112 P18285 INFO Set up feature encoder...
-2022-04-12 16:14:27,112 P18285 INFO Load feature_map from json: ../data/Frappe/frappe_x1_04e961e9/feature_map.json
-2022-04-12 16:14:27,113 P18285 INFO Loading data...
-2022-04-12 16:14:27,115 P18285 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/train.h5
-2022-04-12 16:14:27,126 P18285 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/valid.h5
-2022-04-12 16:14:27,130 P18285 INFO Train samples: total/202027, pos/67604, neg/134423, ratio/33.46%, blocks/1
-2022-04-12 16:14:27,130 P18285 INFO Validation samples: total/57722, pos/19063, neg/38659, ratio/33.03%, blocks/1
-2022-04-12 16:14:27,130 P18285 INFO Loading train data done.
-2022-04-12 16:14:31,290 P18285 INFO Total number of parameters: 58791.
-2022-04-12 16:14:31,291 P18285 INFO Start training: 50 batches/epoch
-2022-04-12 16:14:31,291 P18285 INFO ************ Epoch=1 start ************
-2022-04-12 16:14:37,006 P18285 INFO [Metrics] AUC: 0.916969 - logloss: 0.426452
-2022-04-12 16:14:37,007 P18285 INFO Save best model: monitor(max): 0.916969
-2022-04-12 16:14:37,010 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:14:37,072 P18285 INFO Train loss: 0.624258
-2022-04-12 16:14:37,073 P18285 INFO ************ Epoch=1 end ************
-2022-04-12 16:14:42,746 P18285 INFO [Metrics] AUC: 0.935634 - logloss: 0.293012
-2022-04-12 16:14:42,747 P18285 INFO Save best model: monitor(max): 0.935634
-2022-04-12 16:14:42,749 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:14:42,791 P18285 INFO Train loss: 0.436476
-2022-04-12 16:14:42,791 P18285 INFO ************ Epoch=2 end ************
-2022-04-12 16:14:48,305 P18285 INFO [Metrics] AUC: 0.935704 - logloss: 0.287920
-2022-04-12 16:14:48,305 P18285 INFO Save best model: monitor(max): 0.935704
-2022-04-12 16:14:48,309 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:14:48,354 P18285 INFO Train loss: 0.368875
-2022-04-12 16:14:48,354 P18285 INFO ************ Epoch=3 end ************
-2022-04-12 16:14:53,431 P18285 INFO [Metrics] AUC: 0.937183 - logloss: 0.286680
-2022-04-12 16:14:53,431 P18285 INFO Save best model: monitor(max): 0.937183
-2022-04-12 16:14:53,435 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:14:53,475 P18285 INFO Train loss: 0.352242
-2022-04-12 16:14:53,475 P18285 INFO ************ Epoch=4 end ************
-2022-04-12 16:14:58,179 P18285 INFO [Metrics] AUC: 0.937875 - logloss: 0.285015
-2022-04-12 16:14:58,180 P18285 INFO Save best model: monitor(max): 0.937875
-2022-04-12 16:14:58,184 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:14:58,224 P18285 INFO Train loss: 0.343246
-2022-04-12 16:14:58,225 P18285 INFO ************ Epoch=5 end ************
-2022-04-12 16:15:03,258 P18285 INFO [Metrics] AUC: 0.938140 - logloss: 0.284312
-2022-04-12 16:15:03,259 P18285 INFO Save best model: monitor(max): 0.938140
-2022-04-12 16:15:03,261 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:03,302 P18285 INFO Train loss: 0.338004
-2022-04-12 16:15:03,303 P18285 INFO ************ Epoch=6 end ************
-2022-04-12 16:15:08,347 P18285 INFO [Metrics] AUC: 0.939443 - logloss: 0.282059
-2022-04-12 16:15:08,349 P18285 INFO Save best model: monitor(max): 0.939443
-2022-04-12 16:15:08,352 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:08,394 P18285 INFO Train loss: 0.333321
-2022-04-12 16:15:08,394 P18285 INFO ************ Epoch=7 end ************
-2022-04-12 16:15:13,430 P18285 INFO [Metrics] AUC: 0.939695 - logloss: 0.282163
-2022-04-12 16:15:13,431 P18285 INFO Save best model: monitor(max): 0.939695
-2022-04-12 16:15:13,434 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:13,478 P18285 INFO Train loss: 0.328826
-2022-04-12 16:15:13,479 P18285 INFO ************ Epoch=8 end ************
-2022-04-12 16:15:18,641 P18285 INFO [Metrics] AUC: 0.941144 - logloss: 0.278907
-2022-04-12 16:15:18,642 P18285 INFO Save best model: monitor(max): 0.941144
-2022-04-12 16:15:18,658 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:18,700 P18285 INFO Train loss: 0.324083
-2022-04-12 16:15:18,700 P18285 INFO ************ Epoch=9 end ************
-2022-04-12 16:15:23,852 P18285 INFO [Metrics] AUC: 0.942824 - logloss: 0.275706
-2022-04-12 16:15:23,853 P18285 INFO Save best model: monitor(max): 0.942824
-2022-04-12 16:15:23,856 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:23,895 P18285 INFO Train loss: 0.320081
-2022-04-12 16:15:23,896 P18285 INFO ************ Epoch=10 end ************
-2022-04-12 16:15:29,052 P18285 INFO [Metrics] AUC: 0.944781 - logloss: 0.269588
-2022-04-12 16:15:29,053 P18285 INFO Save best model: monitor(max): 0.944781
-2022-04-12 16:15:29,056 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:29,097 P18285 INFO Train loss: 0.314938
-2022-04-12 16:15:29,097 P18285 INFO ************ Epoch=11 end ************
-2022-04-12 16:15:34,237 P18285 INFO [Metrics] AUC: 0.948028 - logloss: 0.261683
-2022-04-12 16:15:34,238 P18285 INFO Save best model: monitor(max): 0.948028
-2022-04-12 16:15:34,240 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:34,280 P18285 INFO Train loss: 0.308690
-2022-04-12 16:15:34,280 P18285 INFO ************ Epoch=12 end ************
-2022-04-12 16:15:39,589 P18285 INFO [Metrics] AUC: 0.948180 - logloss: 0.260188
-2022-04-12 16:15:39,590 P18285 INFO Save best model: monitor(max): 0.948180
-2022-04-12 16:15:39,593 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:39,645 P18285 INFO Train loss: 0.304371
-2022-04-12 16:15:39,645 P18285 INFO ************ Epoch=13 end ************
-2022-04-12 16:15:44,779 P18285 INFO [Metrics] AUC: 0.949529 - logloss: 0.257608
-2022-04-12 16:15:44,780 P18285 INFO Save best model: monitor(max): 0.949529
-2022-04-12 16:15:44,782 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:44,824 P18285 INFO Train loss: 0.300745
-2022-04-12 16:15:44,824 P18285 INFO ************ Epoch=14 end ************
-2022-04-12 16:15:50,175 P18285 INFO [Metrics] AUC: 0.951175 - logloss: 0.253271
-2022-04-12 16:15:50,176 P18285 INFO Save best model: monitor(max): 0.951175
-2022-04-12 16:15:50,179 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:50,224 P18285 INFO Train loss: 0.297171
-2022-04-12 16:15:50,224 P18285 INFO ************ Epoch=15 end ************
-2022-04-12 16:15:55,570 P18285 INFO [Metrics] AUC: 0.953267 - logloss: 0.249932
-2022-04-12 16:15:55,571 P18285 INFO Save best model: monitor(max): 0.953267
-2022-04-12 16:15:55,574 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:15:55,615 P18285 INFO Train loss: 0.293501
-2022-04-12 16:15:55,615 P18285 INFO ************ Epoch=16 end ************
-2022-04-12 16:16:00,781 P18285 INFO [Metrics] AUC: 0.956531 - logloss: 0.239244
-2022-04-12 16:16:00,782 P18285 INFO Save best model: monitor(max): 0.956531
-2022-04-12 16:16:00,785 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:00,821 P18285 INFO Train loss: 0.289278
-2022-04-12 16:16:00,821 P18285 INFO ************ Epoch=17 end ************
-2022-04-12 16:16:06,129 P18285 INFO [Metrics] AUC: 0.959463 - logloss: 0.231491
-2022-04-12 16:16:06,129 P18285 INFO Save best model: monitor(max): 0.959463
-2022-04-12 16:16:06,132 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:06,169 P18285 INFO Train loss: 0.284526
-2022-04-12 16:16:06,169 P18285 INFO ************ Epoch=18 end ************
-2022-04-12 16:16:11,534 P18285 INFO [Metrics] AUC: 0.959701 - logloss: 0.230186
-2022-04-12 16:16:11,535 P18285 INFO Save best model: monitor(max): 0.959701
-2022-04-12 16:16:11,539 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:11,578 P18285 INFO Train loss: 0.280906
-2022-04-12 16:16:11,578 P18285 INFO ************ Epoch=19 end ************
-2022-04-12 16:16:16,912 P18285 INFO [Metrics] AUC: 0.963088 - logloss: 0.221950
-2022-04-12 16:16:16,912 P18285 INFO Save best model: monitor(max): 0.963088
-2022-04-12 16:16:16,914 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:16,948 P18285 INFO Train loss: 0.277038
-2022-04-12 16:16:16,948 P18285 INFO ************ Epoch=20 end ************
-2022-04-12 16:16:22,506 P18285 INFO [Metrics] AUC: 0.963855 - logloss: 0.219043
-2022-04-12 16:16:22,506 P18285 INFO Save best model: monitor(max): 0.963855
-2022-04-12 16:16:22,509 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:22,558 P18285 INFO Train loss: 0.272575
-2022-04-12 16:16:22,558 P18285 INFO ************ Epoch=21 end ************
-2022-04-12 16:16:27,927 P18285 INFO [Metrics] AUC: 0.965482 - logloss: 0.214416
-2022-04-12 16:16:27,927 P18285 INFO Save best model: monitor(max): 0.965482
-2022-04-12 16:16:27,931 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:27,971 P18285 INFO Train loss: 0.270222
-2022-04-12 16:16:27,971 P18285 INFO ************ Epoch=22 end ************
-2022-04-12 16:16:31,022 P18285 INFO [Metrics] AUC: 0.967133 - logloss: 0.209271
-2022-04-12 16:16:31,023 P18285 INFO Save best model: monitor(max): 0.967133
-2022-04-12 16:16:31,025 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:31,057 P18285 INFO Train loss: 0.267118
-2022-04-12 16:16:31,057 P18285 INFO ************ Epoch=23 end ************
-2022-04-12 16:16:34,084 P18285 INFO [Metrics] AUC: 0.967806 - logloss: 0.207393
-2022-04-12 16:16:34,085 P18285 INFO Save best model: monitor(max): 0.967806
-2022-04-12 16:16:34,088 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:34,129 P18285 INFO Train loss: 0.264162
-2022-04-12 16:16:34,129 P18285 INFO ************ Epoch=24 end ************
-2022-04-12 16:16:38,197 P18285 INFO [Metrics] AUC: 0.968835 - logloss: 0.204159
-2022-04-12 16:16:38,197 P18285 INFO Save best model: monitor(max): 0.968835
-2022-04-12 16:16:38,199 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:38,235 P18285 INFO Train loss: 0.260344
-2022-04-12 16:16:38,235 P18285 INFO ************ Epoch=25 end ************
-2022-04-12 16:16:43,134 P18285 INFO [Metrics] AUC: 0.970038 - logloss: 0.202954
-2022-04-12 16:16:43,135 P18285 INFO Save best model: monitor(max): 0.970038
-2022-04-12 16:16:43,138 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:43,179 P18285 INFO Train loss: 0.258273
-2022-04-12 16:16:43,179 P18285 INFO ************ Epoch=26 end ************
-2022-04-12 16:16:48,452 P18285 INFO [Metrics] AUC: 0.970495 - logloss: 0.198212
-2022-04-12 16:16:48,453 P18285 INFO Save best model: monitor(max): 0.970495
-2022-04-12 16:16:48,455 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:48,497 P18285 INFO Train loss: 0.255641
-2022-04-12 16:16:48,497 P18285 INFO ************ Epoch=27 end ************
-2022-04-12 16:16:53,830 P18285 INFO [Metrics] AUC: 0.971084 - logloss: 0.197439
-2022-04-12 16:16:53,831 P18285 INFO Save best model: monitor(max): 0.971084
-2022-04-12 16:16:53,835 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:53,875 P18285 INFO Train loss: 0.252049
-2022-04-12 16:16:53,876 P18285 INFO ************ Epoch=28 end ************
-2022-04-12 16:16:58,996 P18285 INFO [Metrics] AUC: 0.972599 - logloss: 0.193013
-2022-04-12 16:16:58,997 P18285 INFO Save best model: monitor(max): 0.972599
-2022-04-12 16:16:59,000 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:16:59,043 P18285 INFO Train loss: 0.248332
-2022-04-12 16:16:59,043 P18285 INFO ************ Epoch=29 end ************
-2022-04-12 16:17:04,163 P18285 INFO [Metrics] AUC: 0.972648 - logloss: 0.191178
-2022-04-12 16:17:04,164 P18285 INFO Save best model: monitor(max): 0.972648
-2022-04-12 16:17:04,166 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:04,205 P18285 INFO Train loss: 0.247627
-2022-04-12 16:17:04,205 P18285 INFO ************ Epoch=30 end ************
-2022-04-12 16:17:09,452 P18285 INFO [Metrics] AUC: 0.973083 - logloss: 0.189776
-2022-04-12 16:17:09,453 P18285 INFO Save best model: monitor(max): 0.973083
-2022-04-12 16:17:09,456 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:09,486 P18285 INFO Train loss: 0.243245
-2022-04-12 16:17:09,486 P18285 INFO ************ Epoch=31 end ************
-2022-04-12 16:17:14,788 P18285 INFO [Metrics] AUC: 0.973680 - logloss: 0.187303
-2022-04-12 16:17:14,789 P18285 INFO Save best model: monitor(max): 0.973680
-2022-04-12 16:17:14,792 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:14,823 P18285 INFO Train loss: 0.240660
-2022-04-12 16:17:14,823 P18285 INFO ************ Epoch=32 end ************
-2022-04-12 16:17:20,068 P18285 INFO [Metrics] AUC: 0.974265 - logloss: 0.185779
-2022-04-12 16:17:20,069 P18285 INFO Save best model: monitor(max): 0.974265
-2022-04-12 16:17:20,072 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:20,109 P18285 INFO Train loss: 0.238794
-2022-04-12 16:17:20,109 P18285 INFO ************ Epoch=33 end ************
-2022-04-12 16:17:25,452 P18285 INFO [Metrics] AUC: 0.974288 - logloss: 0.184853
-2022-04-12 16:17:25,453 P18285 INFO Save best model: monitor(max): 0.974288
-2022-04-12 16:17:25,456 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:25,497 P18285 INFO Train loss: 0.236212
-2022-04-12 16:17:25,498 P18285 INFO ************ Epoch=34 end ************
-2022-04-12 16:17:30,900 P18285 INFO [Metrics] AUC: 0.975132 - logloss: 0.181987
-2022-04-12 16:17:30,900 P18285 INFO Save best model: monitor(max): 0.975132
-2022-04-12 16:17:30,902 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:30,944 P18285 INFO Train loss: 0.233114
-2022-04-12 16:17:30,945 P18285 INFO ************ Epoch=35 end ************
-2022-04-12 16:17:36,469 P18285 INFO [Metrics] AUC: 0.975245 - logloss: 0.181657
-2022-04-12 16:17:36,469 P18285 INFO Save best model: monitor(max): 0.975245
-2022-04-12 16:17:36,473 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:36,543 P18285 INFO Train loss: 0.230050
-2022-04-12 16:17:36,543 P18285 INFO ************ Epoch=36 end ************
-2022-04-12 16:17:41,824 P18285 INFO [Metrics] AUC: 0.976012 - logloss: 0.178735
-2022-04-12 16:17:41,825 P18285 INFO Save best model: monitor(max): 0.976012
-2022-04-12 16:17:41,828 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:41,894 P18285 INFO Train loss: 0.226954
-2022-04-12 16:17:41,894 P18285 INFO ************ Epoch=37 end ************
-2022-04-12 16:17:45,207 P18285 INFO [Metrics] AUC: 0.976300 - logloss: 0.177349
-2022-04-12 16:17:45,208 P18285 INFO Save best model: monitor(max): 0.976300
-2022-04-12 16:17:45,211 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:45,248 P18285 INFO Train loss: 0.224711
-2022-04-12 16:17:45,249 P18285 INFO ************ Epoch=38 end ************
-2022-04-12 16:17:47,343 P18285 INFO [Metrics] AUC: 0.976999 - logloss: 0.176214
-2022-04-12 16:17:47,344 P18285 INFO Save best model: monitor(max): 0.976999
-2022-04-12 16:17:47,347 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:47,387 P18285 INFO Train loss: 0.222231
-2022-04-12 16:17:47,387 P18285 INFO ************ Epoch=39 end ************
-2022-04-12 16:17:49,455 P18285 INFO [Metrics] AUC: 0.976842 - logloss: 0.174751
-2022-04-12 16:17:49,455 P18285 INFO Monitor(max) STOP: 0.976842 !
-2022-04-12 16:17:49,455 P18285 INFO Reduce learning rate on plateau: 0.000100
-2022-04-12 16:17:49,456 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:49,496 P18285 INFO Train loss: 0.219488
-2022-04-12 16:17:49,497 P18285 INFO ************ Epoch=40 end ************
-2022-04-12 16:17:51,800 P18285 INFO [Metrics] AUC: 0.979293 - logloss: 0.168707
-2022-04-12 16:17:51,801 P18285 INFO Save best model: monitor(max): 0.979293
-2022-04-12 16:17:51,804 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:51,845 P18285 INFO Train loss: 0.184339
-2022-04-12 16:17:51,845 P18285 INFO ************ Epoch=41 end ************
-2022-04-12 16:17:56,284 P18285 INFO [Metrics] AUC: 0.980418 - logloss: 0.166857
-2022-04-12 16:17:56,285 P18285 INFO Save best model: monitor(max): 0.980418
-2022-04-12 16:17:56,289 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:17:56,333 P18285 INFO Train loss: 0.163555
-2022-04-12 16:17:56,334 P18285 INFO ************ Epoch=42 end ************
-2022-04-12 16:18:02,163 P18285 INFO [Metrics] AUC: 0.981001 - logloss: 0.166472
-2022-04-12 16:18:02,163 P18285 INFO Save best model: monitor(max): 0.981001
-2022-04-12 16:18:02,167 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:18:02,202 P18285 INFO Train loss: 0.151844
-2022-04-12 16:18:02,203 P18285 INFO ************ Epoch=43 end ************
-2022-04-12 16:18:08,128 P18285 INFO [Metrics] AUC: 0.981077 - logloss: 0.168015
-2022-04-12 16:18:08,128 P18285 INFO Save best model: monitor(max): 0.981077
-2022-04-12 16:18:08,132 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:18:08,181 P18285 INFO Train loss: 0.144898
-2022-04-12 16:18:08,181 P18285 INFO ************ Epoch=44 end ************
-2022-04-12 16:18:14,193 P18285 INFO [Metrics] AUC: 0.980999 - logloss: 0.171153
-2022-04-12 16:18:14,194 P18285 INFO Monitor(max) STOP: 0.980999 !
-2022-04-12 16:18:14,194 P18285 INFO Reduce learning rate on plateau: 0.000010
-2022-04-12 16:18:14,194 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:18:14,266 P18285 INFO Train loss: 0.139010
-2022-04-12 16:18:14,266 P18285 INFO ************ Epoch=45 end ************
-2022-04-12 16:18:20,463 P18285 INFO [Metrics] AUC: 0.981054 - logloss: 0.170943
-2022-04-12 16:18:20,464 P18285 INFO Monitor(max) STOP: 0.981054 !
-2022-04-12 16:18:20,464 P18285 INFO Reduce learning rate on plateau: 0.000001
-2022-04-12 16:18:20,464 P18285 INFO Early stopping at epoch=46
-2022-04-12 16:18:20,464 P18285 INFO --- 50/50 batches finished ---
-2022-04-12 16:18:20,543 P18285 INFO Train loss: 0.131743
-2022-04-12 16:18:20,544 P18285 INFO Training finished.
-2022-04-12 16:18:20,544 P18285 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Frappe/SAM_frappe_x1/frappe_x1_04e961e9/SAM_frappe_x1_005_1368acce.model
-2022-04-12 16:18:20,553 P18285 INFO ****** Validation evaluation ******
-2022-04-12 16:18:21,047 P18285 INFO [Metrics] AUC: 0.981077 - logloss: 0.168015
-2022-04-12 16:18:21,146 P18285 INFO ******** Test evaluation ********
-2022-04-12 16:18:21,147 P18285 INFO Loading data...
-2022-04-12 16:18:21,148 P18285 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/test.h5
-2022-04-12 16:18:21,155 P18285 INFO Test samples: total/28860, pos/9536, neg/19324, ratio/33.04%, blocks/1
-2022-04-12 16:18:21,155 P18285 INFO Loading test data done.
-2022-04-12 16:18:21,403 P18285 INFO [Metrics] AUC: 0.980120 - logloss: 0.173521
-
-```
+## SAM_frappe_x1
+
+A hands-on guide to run the SAM model on the Frappe_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 11.4
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.2.1
+ ```
+
+### Dataset
+Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe#Frappe_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [SAM](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/SAM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Frappe/Frappe_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [SAM_frappe_x1_tuner_config_02](./SAM_frappe_x1_tuner_config_02). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd SAM_frappe_x1
+ nohup python run_expid.py --config ./SAM_frappe_x1_tuner_config_02 --expid SAM_frappe_x1_005_1368acce --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.980120 | 0.173521 |
+
+
+### Logs
+```python
+2022-04-12 16:14:27,112 P18285 INFO {
+ "aggregation": "concat",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Frappe/",
+ "dataset_id": "frappe_x1_04e961e9",
+ "debug": "False",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.05",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user', 'item', 'daytime', 'weekday', 'isweekend', 'homework', 'cost', 'weather', 'country', 'city'], 'type': 'categorical'}]",
+ "gpu": "0",
+ "interaction_type": "SAM3A",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "SAM",
+ "model_id": "SAM_frappe_x1_005_1368acce",
+ "model_root": "./Frappe/SAM_frappe_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0.2",
+ "net_regularizer": "0",
+ "num_interaction_layers": "4",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Frappe/Frappe_x1/test.csv",
+ "train_data": "../data/Frappe/Frappe_x1/train.csv",
+ "use_hdf5": "True",
+ "use_residual": "True",
+ "valid_data": "../data/Frappe/Frappe_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch"
+}
+2022-04-12 16:14:27,112 P18285 INFO Set up feature encoder...
+2022-04-12 16:14:27,112 P18285 INFO Load feature_map from json: ../data/Frappe/frappe_x1_04e961e9/feature_map.json
+2022-04-12 16:14:27,113 P18285 INFO Loading data...
+2022-04-12 16:14:27,115 P18285 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/train.h5
+2022-04-12 16:14:27,126 P18285 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/valid.h5
+2022-04-12 16:14:27,130 P18285 INFO Train samples: total/202027, pos/67604, neg/134423, ratio/33.46%, blocks/1
+2022-04-12 16:14:27,130 P18285 INFO Validation samples: total/57722, pos/19063, neg/38659, ratio/33.03%, blocks/1
+2022-04-12 16:14:27,130 P18285 INFO Loading train data done.
+2022-04-12 16:14:31,290 P18285 INFO Total number of parameters: 58791.
+2022-04-12 16:14:31,291 P18285 INFO Start training: 50 batches/epoch
+2022-04-12 16:14:31,291 P18285 INFO ************ Epoch=1 start ************
+2022-04-12 16:14:37,006 P18285 INFO [Metrics] AUC: 0.916969 - logloss: 0.426452
+2022-04-12 16:14:37,007 P18285 INFO Save best model: monitor(max): 0.916969
+2022-04-12 16:14:37,010 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:14:37,072 P18285 INFO Train loss: 0.624258
+2022-04-12 16:14:37,073 P18285 INFO ************ Epoch=1 end ************
+2022-04-12 16:14:42,746 P18285 INFO [Metrics] AUC: 0.935634 - logloss: 0.293012
+2022-04-12 16:14:42,747 P18285 INFO Save best model: monitor(max): 0.935634
+2022-04-12 16:14:42,749 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:14:42,791 P18285 INFO Train loss: 0.436476
+2022-04-12 16:14:42,791 P18285 INFO ************ Epoch=2 end ************
+2022-04-12 16:14:48,305 P18285 INFO [Metrics] AUC: 0.935704 - logloss: 0.287920
+2022-04-12 16:14:48,305 P18285 INFO Save best model: monitor(max): 0.935704
+2022-04-12 16:14:48,309 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:14:48,354 P18285 INFO Train loss: 0.368875
+2022-04-12 16:14:48,354 P18285 INFO ************ Epoch=3 end ************
+2022-04-12 16:14:53,431 P18285 INFO [Metrics] AUC: 0.937183 - logloss: 0.286680
+2022-04-12 16:14:53,431 P18285 INFO Save best model: monitor(max): 0.937183
+2022-04-12 16:14:53,435 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:14:53,475 P18285 INFO Train loss: 0.352242
+2022-04-12 16:14:53,475 P18285 INFO ************ Epoch=4 end ************
+2022-04-12 16:14:58,179 P18285 INFO [Metrics] AUC: 0.937875 - logloss: 0.285015
+2022-04-12 16:14:58,180 P18285 INFO Save best model: monitor(max): 0.937875
+2022-04-12 16:14:58,184 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:14:58,224 P18285 INFO Train loss: 0.343246
+2022-04-12 16:14:58,225 P18285 INFO ************ Epoch=5 end ************
+2022-04-12 16:15:03,258 P18285 INFO [Metrics] AUC: 0.938140 - logloss: 0.284312
+2022-04-12 16:15:03,259 P18285 INFO Save best model: monitor(max): 0.938140
+2022-04-12 16:15:03,261 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:03,302 P18285 INFO Train loss: 0.338004
+2022-04-12 16:15:03,303 P18285 INFO ************ Epoch=6 end ************
+2022-04-12 16:15:08,347 P18285 INFO [Metrics] AUC: 0.939443 - logloss: 0.282059
+2022-04-12 16:15:08,349 P18285 INFO Save best model: monitor(max): 0.939443
+2022-04-12 16:15:08,352 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:08,394 P18285 INFO Train loss: 0.333321
+2022-04-12 16:15:08,394 P18285 INFO ************ Epoch=7 end ************
+2022-04-12 16:15:13,430 P18285 INFO [Metrics] AUC: 0.939695 - logloss: 0.282163
+2022-04-12 16:15:13,431 P18285 INFO Save best model: monitor(max): 0.939695
+2022-04-12 16:15:13,434 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:13,478 P18285 INFO Train loss: 0.328826
+2022-04-12 16:15:13,479 P18285 INFO ************ Epoch=8 end ************
+2022-04-12 16:15:18,641 P18285 INFO [Metrics] AUC: 0.941144 - logloss: 0.278907
+2022-04-12 16:15:18,642 P18285 INFO Save best model: monitor(max): 0.941144
+2022-04-12 16:15:18,658 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:18,700 P18285 INFO Train loss: 0.324083
+2022-04-12 16:15:18,700 P18285 INFO ************ Epoch=9 end ************
+2022-04-12 16:15:23,852 P18285 INFO [Metrics] AUC: 0.942824 - logloss: 0.275706
+2022-04-12 16:15:23,853 P18285 INFO Save best model: monitor(max): 0.942824
+2022-04-12 16:15:23,856 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:23,895 P18285 INFO Train loss: 0.320081
+2022-04-12 16:15:23,896 P18285 INFO ************ Epoch=10 end ************
+2022-04-12 16:15:29,052 P18285 INFO [Metrics] AUC: 0.944781 - logloss: 0.269588
+2022-04-12 16:15:29,053 P18285 INFO Save best model: monitor(max): 0.944781
+2022-04-12 16:15:29,056 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:29,097 P18285 INFO Train loss: 0.314938
+2022-04-12 16:15:29,097 P18285 INFO ************ Epoch=11 end ************
+2022-04-12 16:15:34,237 P18285 INFO [Metrics] AUC: 0.948028 - logloss: 0.261683
+2022-04-12 16:15:34,238 P18285 INFO Save best model: monitor(max): 0.948028
+2022-04-12 16:15:34,240 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:34,280 P18285 INFO Train loss: 0.308690
+2022-04-12 16:15:34,280 P18285 INFO ************ Epoch=12 end ************
+2022-04-12 16:15:39,589 P18285 INFO [Metrics] AUC: 0.948180 - logloss: 0.260188
+2022-04-12 16:15:39,590 P18285 INFO Save best model: monitor(max): 0.948180
+2022-04-12 16:15:39,593 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:39,645 P18285 INFO Train loss: 0.304371
+2022-04-12 16:15:39,645 P18285 INFO ************ Epoch=13 end ************
+2022-04-12 16:15:44,779 P18285 INFO [Metrics] AUC: 0.949529 - logloss: 0.257608
+2022-04-12 16:15:44,780 P18285 INFO Save best model: monitor(max): 0.949529
+2022-04-12 16:15:44,782 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:44,824 P18285 INFO Train loss: 0.300745
+2022-04-12 16:15:44,824 P18285 INFO ************ Epoch=14 end ************
+2022-04-12 16:15:50,175 P18285 INFO [Metrics] AUC: 0.951175 - logloss: 0.253271
+2022-04-12 16:15:50,176 P18285 INFO Save best model: monitor(max): 0.951175
+2022-04-12 16:15:50,179 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:50,224 P18285 INFO Train loss: 0.297171
+2022-04-12 16:15:50,224 P18285 INFO ************ Epoch=15 end ************
+2022-04-12 16:15:55,570 P18285 INFO [Metrics] AUC: 0.953267 - logloss: 0.249932
+2022-04-12 16:15:55,571 P18285 INFO Save best model: monitor(max): 0.953267
+2022-04-12 16:15:55,574 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:15:55,615 P18285 INFO Train loss: 0.293501
+2022-04-12 16:15:55,615 P18285 INFO ************ Epoch=16 end ************
+2022-04-12 16:16:00,781 P18285 INFO [Metrics] AUC: 0.956531 - logloss: 0.239244
+2022-04-12 16:16:00,782 P18285 INFO Save best model: monitor(max): 0.956531
+2022-04-12 16:16:00,785 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:00,821 P18285 INFO Train loss: 0.289278
+2022-04-12 16:16:00,821 P18285 INFO ************ Epoch=17 end ************
+2022-04-12 16:16:06,129 P18285 INFO [Metrics] AUC: 0.959463 - logloss: 0.231491
+2022-04-12 16:16:06,129 P18285 INFO Save best model: monitor(max): 0.959463
+2022-04-12 16:16:06,132 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:06,169 P18285 INFO Train loss: 0.284526
+2022-04-12 16:16:06,169 P18285 INFO ************ Epoch=18 end ************
+2022-04-12 16:16:11,534 P18285 INFO [Metrics] AUC: 0.959701 - logloss: 0.230186
+2022-04-12 16:16:11,535 P18285 INFO Save best model: monitor(max): 0.959701
+2022-04-12 16:16:11,539 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:11,578 P18285 INFO Train loss: 0.280906
+2022-04-12 16:16:11,578 P18285 INFO ************ Epoch=19 end ************
+2022-04-12 16:16:16,912 P18285 INFO [Metrics] AUC: 0.963088 - logloss: 0.221950
+2022-04-12 16:16:16,912 P18285 INFO Save best model: monitor(max): 0.963088
+2022-04-12 16:16:16,914 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:16,948 P18285 INFO Train loss: 0.277038
+2022-04-12 16:16:16,948 P18285 INFO ************ Epoch=20 end ************
+2022-04-12 16:16:22,506 P18285 INFO [Metrics] AUC: 0.963855 - logloss: 0.219043
+2022-04-12 16:16:22,506 P18285 INFO Save best model: monitor(max): 0.963855
+2022-04-12 16:16:22,509 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:22,558 P18285 INFO Train loss: 0.272575
+2022-04-12 16:16:22,558 P18285 INFO ************ Epoch=21 end ************
+2022-04-12 16:16:27,927 P18285 INFO [Metrics] AUC: 0.965482 - logloss: 0.214416
+2022-04-12 16:16:27,927 P18285 INFO Save best model: monitor(max): 0.965482
+2022-04-12 16:16:27,931 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:27,971 P18285 INFO Train loss: 0.270222
+2022-04-12 16:16:27,971 P18285 INFO ************ Epoch=22 end ************
+2022-04-12 16:16:31,022 P18285 INFO [Metrics] AUC: 0.967133 - logloss: 0.209271
+2022-04-12 16:16:31,023 P18285 INFO Save best model: monitor(max): 0.967133
+2022-04-12 16:16:31,025 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:31,057 P18285 INFO Train loss: 0.267118
+2022-04-12 16:16:31,057 P18285 INFO ************ Epoch=23 end ************
+2022-04-12 16:16:34,084 P18285 INFO [Metrics] AUC: 0.967806 - logloss: 0.207393
+2022-04-12 16:16:34,085 P18285 INFO Save best model: monitor(max): 0.967806
+2022-04-12 16:16:34,088 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:34,129 P18285 INFO Train loss: 0.264162
+2022-04-12 16:16:34,129 P18285 INFO ************ Epoch=24 end ************
+2022-04-12 16:16:38,197 P18285 INFO [Metrics] AUC: 0.968835 - logloss: 0.204159
+2022-04-12 16:16:38,197 P18285 INFO Save best model: monitor(max): 0.968835
+2022-04-12 16:16:38,199 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:38,235 P18285 INFO Train loss: 0.260344
+2022-04-12 16:16:38,235 P18285 INFO ************ Epoch=25 end ************
+2022-04-12 16:16:43,134 P18285 INFO [Metrics] AUC: 0.970038 - logloss: 0.202954
+2022-04-12 16:16:43,135 P18285 INFO Save best model: monitor(max): 0.970038
+2022-04-12 16:16:43,138 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:43,179 P18285 INFO Train loss: 0.258273
+2022-04-12 16:16:43,179 P18285 INFO ************ Epoch=26 end ************
+2022-04-12 16:16:48,452 P18285 INFO [Metrics] AUC: 0.970495 - logloss: 0.198212
+2022-04-12 16:16:48,453 P18285 INFO Save best model: monitor(max): 0.970495
+2022-04-12 16:16:48,455 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:48,497 P18285 INFO Train loss: 0.255641
+2022-04-12 16:16:48,497 P18285 INFO ************ Epoch=27 end ************
+2022-04-12 16:16:53,830 P18285 INFO [Metrics] AUC: 0.971084 - logloss: 0.197439
+2022-04-12 16:16:53,831 P18285 INFO Save best model: monitor(max): 0.971084
+2022-04-12 16:16:53,835 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:53,875 P18285 INFO Train loss: 0.252049
+2022-04-12 16:16:53,876 P18285 INFO ************ Epoch=28 end ************
+2022-04-12 16:16:58,996 P18285 INFO [Metrics] AUC: 0.972599 - logloss: 0.193013
+2022-04-12 16:16:58,997 P18285 INFO Save best model: monitor(max): 0.972599
+2022-04-12 16:16:59,000 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:16:59,043 P18285 INFO Train loss: 0.248332
+2022-04-12 16:16:59,043 P18285 INFO ************ Epoch=29 end ************
+2022-04-12 16:17:04,163 P18285 INFO [Metrics] AUC: 0.972648 - logloss: 0.191178
+2022-04-12 16:17:04,164 P18285 INFO Save best model: monitor(max): 0.972648
+2022-04-12 16:17:04,166 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:04,205 P18285 INFO Train loss: 0.247627
+2022-04-12 16:17:04,205 P18285 INFO ************ Epoch=30 end ************
+2022-04-12 16:17:09,452 P18285 INFO [Metrics] AUC: 0.973083 - logloss: 0.189776
+2022-04-12 16:17:09,453 P18285 INFO Save best model: monitor(max): 0.973083
+2022-04-12 16:17:09,456 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:09,486 P18285 INFO Train loss: 0.243245
+2022-04-12 16:17:09,486 P18285 INFO ************ Epoch=31 end ************
+2022-04-12 16:17:14,788 P18285 INFO [Metrics] AUC: 0.973680 - logloss: 0.187303
+2022-04-12 16:17:14,789 P18285 INFO Save best model: monitor(max): 0.973680
+2022-04-12 16:17:14,792 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:14,823 P18285 INFO Train loss: 0.240660
+2022-04-12 16:17:14,823 P18285 INFO ************ Epoch=32 end ************
+2022-04-12 16:17:20,068 P18285 INFO [Metrics] AUC: 0.974265 - logloss: 0.185779
+2022-04-12 16:17:20,069 P18285 INFO Save best model: monitor(max): 0.974265
+2022-04-12 16:17:20,072 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:20,109 P18285 INFO Train loss: 0.238794
+2022-04-12 16:17:20,109 P18285 INFO ************ Epoch=33 end ************
+2022-04-12 16:17:25,452 P18285 INFO [Metrics] AUC: 0.974288 - logloss: 0.184853
+2022-04-12 16:17:25,453 P18285 INFO Save best model: monitor(max): 0.974288
+2022-04-12 16:17:25,456 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:25,497 P18285 INFO Train loss: 0.236212
+2022-04-12 16:17:25,498 P18285 INFO ************ Epoch=34 end ************
+2022-04-12 16:17:30,900 P18285 INFO [Metrics] AUC: 0.975132 - logloss: 0.181987
+2022-04-12 16:17:30,900 P18285 INFO Save best model: monitor(max): 0.975132
+2022-04-12 16:17:30,902 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:30,944 P18285 INFO Train loss: 0.233114
+2022-04-12 16:17:30,945 P18285 INFO ************ Epoch=35 end ************
+2022-04-12 16:17:36,469 P18285 INFO [Metrics] AUC: 0.975245 - logloss: 0.181657
+2022-04-12 16:17:36,469 P18285 INFO Save best model: monitor(max): 0.975245
+2022-04-12 16:17:36,473 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:36,543 P18285 INFO Train loss: 0.230050
+2022-04-12 16:17:36,543 P18285 INFO ************ Epoch=36 end ************
+2022-04-12 16:17:41,824 P18285 INFO [Metrics] AUC: 0.976012 - logloss: 0.178735
+2022-04-12 16:17:41,825 P18285 INFO Save best model: monitor(max): 0.976012
+2022-04-12 16:17:41,828 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:41,894 P18285 INFO Train loss: 0.226954
+2022-04-12 16:17:41,894 P18285 INFO ************ Epoch=37 end ************
+2022-04-12 16:17:45,207 P18285 INFO [Metrics] AUC: 0.976300 - logloss: 0.177349
+2022-04-12 16:17:45,208 P18285 INFO Save best model: monitor(max): 0.976300
+2022-04-12 16:17:45,211 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:45,248 P18285 INFO Train loss: 0.224711
+2022-04-12 16:17:45,249 P18285 INFO ************ Epoch=38 end ************
+2022-04-12 16:17:47,343 P18285 INFO [Metrics] AUC: 0.976999 - logloss: 0.176214
+2022-04-12 16:17:47,344 P18285 INFO Save best model: monitor(max): 0.976999
+2022-04-12 16:17:47,347 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:47,387 P18285 INFO Train loss: 0.222231
+2022-04-12 16:17:47,387 P18285 INFO ************ Epoch=39 end ************
+2022-04-12 16:17:49,455 P18285 INFO [Metrics] AUC: 0.976842 - logloss: 0.174751
+2022-04-12 16:17:49,455 P18285 INFO Monitor(max) STOP: 0.976842 !
+2022-04-12 16:17:49,455 P18285 INFO Reduce learning rate on plateau: 0.000100
+2022-04-12 16:17:49,456 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:49,496 P18285 INFO Train loss: 0.219488
+2022-04-12 16:17:49,497 P18285 INFO ************ Epoch=40 end ************
+2022-04-12 16:17:51,800 P18285 INFO [Metrics] AUC: 0.979293 - logloss: 0.168707
+2022-04-12 16:17:51,801 P18285 INFO Save best model: monitor(max): 0.979293
+2022-04-12 16:17:51,804 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:51,845 P18285 INFO Train loss: 0.184339
+2022-04-12 16:17:51,845 P18285 INFO ************ Epoch=41 end ************
+2022-04-12 16:17:56,284 P18285 INFO [Metrics] AUC: 0.980418 - logloss: 0.166857
+2022-04-12 16:17:56,285 P18285 INFO Save best model: monitor(max): 0.980418
+2022-04-12 16:17:56,289 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:17:56,333 P18285 INFO Train loss: 0.163555
+2022-04-12 16:17:56,334 P18285 INFO ************ Epoch=42 end ************
+2022-04-12 16:18:02,163 P18285 INFO [Metrics] AUC: 0.981001 - logloss: 0.166472
+2022-04-12 16:18:02,163 P18285 INFO Save best model: monitor(max): 0.981001
+2022-04-12 16:18:02,167 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:18:02,202 P18285 INFO Train loss: 0.151844
+2022-04-12 16:18:02,203 P18285 INFO ************ Epoch=43 end ************
+2022-04-12 16:18:08,128 P18285 INFO [Metrics] AUC: 0.981077 - logloss: 0.168015
+2022-04-12 16:18:08,128 P18285 INFO Save best model: monitor(max): 0.981077
+2022-04-12 16:18:08,132 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:18:08,181 P18285 INFO Train loss: 0.144898
+2022-04-12 16:18:08,181 P18285 INFO ************ Epoch=44 end ************
+2022-04-12 16:18:14,193 P18285 INFO [Metrics] AUC: 0.980999 - logloss: 0.171153
+2022-04-12 16:18:14,194 P18285 INFO Monitor(max) STOP: 0.980999 !
+2022-04-12 16:18:14,194 P18285 INFO Reduce learning rate on plateau: 0.000010
+2022-04-12 16:18:14,194 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:18:14,266 P18285 INFO Train loss: 0.139010
+2022-04-12 16:18:14,266 P18285 INFO ************ Epoch=45 end ************
+2022-04-12 16:18:20,463 P18285 INFO [Metrics] AUC: 0.981054 - logloss: 0.170943
+2022-04-12 16:18:20,464 P18285 INFO Monitor(max) STOP: 0.981054 !
+2022-04-12 16:18:20,464 P18285 INFO Reduce learning rate on plateau: 0.000001
+2022-04-12 16:18:20,464 P18285 INFO Early stopping at epoch=46
+2022-04-12 16:18:20,464 P18285 INFO --- 50/50 batches finished ---
+2022-04-12 16:18:20,543 P18285 INFO Train loss: 0.131743
+2022-04-12 16:18:20,544 P18285 INFO Training finished.
+2022-04-12 16:18:20,544 P18285 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Frappe/SAM_frappe_x1/frappe_x1_04e961e9/SAM_frappe_x1_005_1368acce.model
+2022-04-12 16:18:20,553 P18285 INFO ****** Validation evaluation ******
+2022-04-12 16:18:21,047 P18285 INFO [Metrics] AUC: 0.981077 - logloss: 0.168015
+2022-04-12 16:18:21,146 P18285 INFO ******** Test evaluation ********
+2022-04-12 16:18:21,147 P18285 INFO Loading data...
+2022-04-12 16:18:21,148 P18285 INFO Loading data from h5: ../data/Frappe/frappe_x1_04e961e9/test.h5
+2022-04-12 16:18:21,155 P18285 INFO Test samples: total/28860, pos/9536, neg/19324, ratio/33.04%, blocks/1
+2022-04-12 16:18:21,155 P18285 INFO Loading test data done.
+2022-04-12 16:18:21,403 P18285 INFO [Metrics] AUC: 0.980120 - logloss: 0.173521
+
+```
diff --git a/ranking/ctr/SAM/SAM_movielenslatest_x1/README.md b/ranking/ctr/SAM/SAM_movielenslatest_x1/README.md
index eb469f0a..2ce3af52 100644
--- a/ranking/ctr/SAM/SAM_movielenslatest_x1/README.md
+++ b/ranking/ctr/SAM/SAM_movielenslatest_x1/README.md
@@ -1,255 +1,255 @@
-## SAM_movielenslatest_x1
-
-A hands-on guide to run the SAM model on the MovielensLatest_x1 dataset.
-
-Author: [XUEPAI](https://github.com/xue-pai)
-
-### Index
-[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
-
-### Environments
-+ Hardware
-
- ```python
- CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
- GPU: Tesla P100 16G
- RAM: 755G
-
- ```
-
-+ Software
-
- ```python
- CUDA: 11.4
- python: 3.6.5
- pytorch: 1.0.1.post2
- pandas: 0.23.0
- numpy: 1.18.1
- scipy: 1.1.0
- sklearn: 0.23.1
- pyyaml: 5.1
- h5py: 2.7.1
- tqdm: 4.59.0
- fuxictr: 1.2.1
- ```
-
-### Dataset
-Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
-
-### Code
-
-We use [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [SAM](https://github.com/xue-pai/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/SAM.py).
-
-Running steps:
-
-1. Download [FuxiCTR-v1.2.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
-
- ```python
- sys.path.append('YOUR_PATH_TO_FuxiCTR/')
- ```
-
-2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
-
-3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [SAM_movielenslatest_x1_tuner_config_06](./SAM_movielenslatest_x1_tuner_config_06). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
-
-4. Run the following script to start.
-
- ```bash
- cd SAM_movielenslatest_x1
- nohup python run_expid.py --config ./SAM_movielenslatest_x1_tuner_config_06 --expid SAM_movielenslatest_x1_013_68a6bc8b --gpu 0 > run.log &
- tail -f run.log
- ```
-
-### Results
-
-| AUC | logloss |
-|:--------------------:|:--------------------:|
-| 0.963104 | 0.266696 |
-
-
-### Logs
-```python
-2022-04-12 16:54:38,091 P12228 INFO {
- "aggregation": "weighted_pooling",
- "batch_size": "4096",
- "data_format": "csv",
- "data_root": "../data/Movielens/",
- "dataset_id": "movielenslatest_x1_cd32d937",
- "debug": "False",
- "embedding_dim": "10",
- "embedding_regularizer": "0.001",
- "epochs": "100",
- "every_x_epochs": "1",
- "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user_id', 'item_id', 'tag_id'], 'type': 'categorical'}]",
- "gpu": "1",
- "interaction_type": "SAM3A",
- "label_col": "{'dtype': 'float', 'name': 'label'}",
- "learning_rate": "0.001",
- "loss": "binary_crossentropy",
- "metrics": "['AUC', 'logloss']",
- "min_categr_count": "1",
- "model": "SAM",
- "model_id": "SAM_movielenslatest_x1_013_68a6bc8b",
- "model_root": "./Movielens/SAM_movielenslatest_x1/",
- "monitor": "AUC",
- "monitor_mode": "max",
- "net_dropout": "0",
- "net_regularizer": "0",
- "num_interaction_layers": "2",
- "num_workers": "3",
- "optimizer": "adam",
- "patience": "2",
- "pickle_feature_encoder": "True",
- "save_best_only": "True",
- "seed": "2021",
- "shuffle": "True",
- "task": "binary_classification",
- "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
- "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
- "use_hdf5": "True",
- "use_residual": "True",
- "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
- "verbose": "1",
- "version": "pytorch"
-}
-2022-04-12 16:54:38,092 P12228 INFO Set up feature encoder...
-2022-04-12 16:54:38,092 P12228 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_cd32d937/feature_map.json
-2022-04-12 16:54:38,092 P12228 INFO Loading data...
-2022-04-12 16:54:38,095 P12228 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/train.h5
-2022-04-12 16:54:38,123 P12228 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/valid.h5
-2022-04-12 16:54:38,132 P12228 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
-2022-04-12 16:54:38,133 P12228 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
-2022-04-12 16:54:38,133 P12228 INFO Loading train data done.
-2022-04-12 16:54:41,830 P12228 INFO Total number of parameters: 902984.
-2022-04-12 16:54:41,830 P12228 INFO Start training: 343 batches/epoch
-2022-04-12 16:54:41,831 P12228 INFO ************ Epoch=1 start ************
-2022-04-12 16:54:51,666 P12228 INFO [Metrics] AUC: 0.930376 - logloss: 0.301244
-2022-04-12 16:54:51,667 P12228 INFO Save best model: monitor(max): 0.930376
-2022-04-12 16:54:51,673 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:54:51,749 P12228 INFO Train loss: 0.427487
-2022-04-12 16:54:51,750 P12228 INFO ************ Epoch=1 end ************
-2022-04-12 16:55:00,419 P12228 INFO [Metrics] AUC: 0.933289 - logloss: 0.296415
-2022-04-12 16:55:00,419 P12228 INFO Save best model: monitor(max): 0.933289
-2022-04-12 16:55:00,426 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:55:00,541 P12228 INFO Train loss: 0.334632
-2022-04-12 16:55:00,541 P12228 INFO ************ Epoch=2 end ************
-2022-04-12 16:55:08,977 P12228 INFO [Metrics] AUC: 0.937246 - logloss: 0.287503
-2022-04-12 16:55:08,977 P12228 INFO Save best model: monitor(max): 0.937246
-2022-04-12 16:55:08,985 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:55:09,047 P12228 INFO Train loss: 0.321591
-2022-04-12 16:55:09,047 P12228 INFO ************ Epoch=3 end ************
-2022-04-12 16:55:18,340 P12228 INFO [Metrics] AUC: 0.940396 - logloss: 0.279409
-2022-04-12 16:55:18,340 P12228 INFO Save best model: monitor(max): 0.940396
-2022-04-12 16:55:18,348 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:55:18,401 P12228 INFO Train loss: 0.314695
-2022-04-12 16:55:18,401 P12228 INFO ************ Epoch=4 end ************
-2022-04-12 16:55:27,779 P12228 INFO [Metrics] AUC: 0.942787 - logloss: 0.273496
-2022-04-12 16:55:27,780 P12228 INFO Save best model: monitor(max): 0.942787
-2022-04-12 16:55:27,787 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:55:27,856 P12228 INFO Train loss: 0.306847
-2022-04-12 16:55:27,856 P12228 INFO ************ Epoch=5 end ************
-2022-04-12 16:55:37,908 P12228 INFO [Metrics] AUC: 0.944759 - logloss: 0.269057
-2022-04-12 16:55:37,909 P12228 INFO Save best model: monitor(max): 0.944759
-2022-04-12 16:55:37,920 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:55:37,978 P12228 INFO Train loss: 0.299294
-2022-04-12 16:55:37,978 P12228 INFO ************ Epoch=6 end ************
-2022-04-12 16:55:47,691 P12228 INFO [Metrics] AUC: 0.946652 - logloss: 0.264696
-2022-04-12 16:55:47,692 P12228 INFO Save best model: monitor(max): 0.946652
-2022-04-12 16:55:47,700 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:55:47,758 P12228 INFO Train loss: 0.291928
-2022-04-12 16:55:47,758 P12228 INFO ************ Epoch=7 end ************
-2022-04-12 16:55:57,385 P12228 INFO [Metrics] AUC: 0.949001 - logloss: 0.258957
-2022-04-12 16:55:57,386 P12228 INFO Save best model: monitor(max): 0.949001
-2022-04-12 16:55:57,393 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:55:57,467 P12228 INFO Train loss: 0.284887
-2022-04-12 16:55:57,467 P12228 INFO ************ Epoch=8 end ************
-2022-04-12 16:56:06,608 P12228 INFO [Metrics] AUC: 0.951515 - logloss: 0.253123
-2022-04-12 16:56:06,609 P12228 INFO Save best model: monitor(max): 0.951515
-2022-04-12 16:56:06,617 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:56:06,675 P12228 INFO Train loss: 0.277926
-2022-04-12 16:56:06,675 P12228 INFO ************ Epoch=9 end ************
-2022-04-12 16:56:16,250 P12228 INFO [Metrics] AUC: 0.953628 - logloss: 0.246790
-2022-04-12 16:56:16,251 P12228 INFO Save best model: monitor(max): 0.953628
-2022-04-12 16:56:16,258 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:56:16,312 P12228 INFO Train loss: 0.271264
-2022-04-12 16:56:16,312 P12228 INFO ************ Epoch=10 end ************
-2022-04-12 16:56:25,827 P12228 INFO [Metrics] AUC: 0.955708 - logloss: 0.241655
-2022-04-12 16:56:25,828 P12228 INFO Save best model: monitor(max): 0.955708
-2022-04-12 16:56:25,835 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:56:25,901 P12228 INFO Train loss: 0.264930
-2022-04-12 16:56:25,901 P12228 INFO ************ Epoch=11 end ************
-2022-04-12 16:56:35,385 P12228 INFO [Metrics] AUC: 0.957115 - logloss: 0.239097
-2022-04-12 16:56:35,385 P12228 INFO Save best model: monitor(max): 0.957115
-2022-04-12 16:56:35,392 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:56:35,459 P12228 INFO Train loss: 0.258424
-2022-04-12 16:56:35,460 P12228 INFO ************ Epoch=12 end ************
-2022-04-12 16:56:44,696 P12228 INFO [Metrics] AUC: 0.958158 - logloss: 0.237206
-2022-04-12 16:56:44,697 P12228 INFO Save best model: monitor(max): 0.958158
-2022-04-12 16:56:44,704 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:56:44,754 P12228 INFO Train loss: 0.250939
-2022-04-12 16:56:44,754 P12228 INFO ************ Epoch=13 end ************
-2022-04-12 16:56:54,347 P12228 INFO [Metrics] AUC: 0.958425 - logloss: 0.238402
-2022-04-12 16:56:54,348 P12228 INFO Save best model: monitor(max): 0.958425
-2022-04-12 16:56:54,355 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:56:54,427 P12228 INFO Train loss: 0.242757
-2022-04-12 16:56:54,427 P12228 INFO ************ Epoch=14 end ************
-2022-04-12 16:57:04,075 P12228 INFO [Metrics] AUC: 0.958673 - logloss: 0.240255
-2022-04-12 16:57:04,076 P12228 INFO Save best model: monitor(max): 0.958673
-2022-04-12 16:57:04,083 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:57:04,148 P12228 INFO Train loss: 0.236126
-2022-04-12 16:57:04,149 P12228 INFO ************ Epoch=15 end ************
-2022-04-12 16:57:13,373 P12228 INFO [Metrics] AUC: 0.958731 - logloss: 0.243396
-2022-04-12 16:57:13,374 P12228 INFO Save best model: monitor(max): 0.958731
-2022-04-12 16:57:13,380 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:57:13,433 P12228 INFO Train loss: 0.229093
-2022-04-12 16:57:13,433 P12228 INFO ************ Epoch=16 end ************
-2022-04-12 16:57:22,435 P12228 INFO [Metrics] AUC: 0.958769 - logloss: 0.247040
-2022-04-12 16:57:22,436 P12228 INFO Save best model: monitor(max): 0.958769
-2022-04-12 16:57:22,443 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:57:22,501 P12228 INFO Train loss: 0.223994
-2022-04-12 16:57:22,501 P12228 INFO ************ Epoch=17 end ************
-2022-04-12 16:57:31,730 P12228 INFO [Metrics] AUC: 0.958379 - logloss: 0.250961
-2022-04-12 16:57:31,731 P12228 INFO Monitor(max) STOP: 0.958379 !
-2022-04-12 16:57:31,731 P12228 INFO Reduce learning rate on plateau: 0.000100
-2022-04-12 16:57:31,731 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:57:31,796 P12228 INFO Train loss: 0.218576
-2022-04-12 16:57:31,796 P12228 INFO ************ Epoch=18 end ************
-2022-04-12 16:57:41,114 P12228 INFO [Metrics] AUC: 0.961894 - logloss: 0.250288
-2022-04-12 16:57:41,115 P12228 INFO Save best model: monitor(max): 0.961894
-2022-04-12 16:57:41,122 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:57:41,184 P12228 INFO Train loss: 0.172328
-2022-04-12 16:57:41,184 P12228 INFO ************ Epoch=19 end ************
-2022-04-12 16:57:50,463 P12228 INFO [Metrics] AUC: 0.962941 - logloss: 0.257173
-2022-04-12 16:57:50,464 P12228 INFO Save best model: monitor(max): 0.962941
-2022-04-12 16:57:50,471 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:57:50,545 P12228 INFO Train loss: 0.141895
-2022-04-12 16:57:50,546 P12228 INFO ************ Epoch=20 end ************
-2022-04-12 16:57:59,113 P12228 INFO [Metrics] AUC: 0.963267 - logloss: 0.266475
-2022-04-12 16:57:59,113 P12228 INFO Save best model: monitor(max): 0.963267
-2022-04-12 16:57:59,120 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:57:59,182 P12228 INFO Train loss: 0.123147
-2022-04-12 16:57:59,182 P12228 INFO ************ Epoch=21 end ************
-2022-04-12 16:58:07,262 P12228 INFO [Metrics] AUC: 0.963191 - logloss: 0.276652
-2022-04-12 16:58:07,263 P12228 INFO Monitor(max) STOP: 0.963191 !
-2022-04-12 16:58:07,263 P12228 INFO Reduce learning rate on plateau: 0.000010
-2022-04-12 16:58:07,263 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:58:07,315 P12228 INFO Train loss: 0.109783
-2022-04-12 16:58:07,316 P12228 INFO ************ Epoch=22 end ************
-2022-04-12 16:58:15,280 P12228 INFO [Metrics] AUC: 0.963187 - logloss: 0.277999
-2022-04-12 16:58:15,280 P12228 INFO Monitor(max) STOP: 0.963187 !
-2022-04-12 16:58:15,281 P12228 INFO Reduce learning rate on plateau: 0.000001
-2022-04-12 16:58:15,281 P12228 INFO Early stopping at epoch=23
-2022-04-12 16:58:15,281 P12228 INFO --- 343/343 batches finished ---
-2022-04-12 16:58:15,334 P12228 INFO Train loss: 0.098557
-2022-04-12 16:58:15,334 P12228 INFO Training finished.
-2022-04-12 16:58:15,334 P12228 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Movielens/SAM_movielenslatest_x1/movielenslatest_x1_cd32d937/SAM_movielenslatest_x1_013_68a6bc8b.model
-2022-04-12 16:58:18,900 P12228 INFO ****** Validation evaluation ******
-2022-04-12 16:58:21,366 P12228 INFO [Metrics] AUC: 0.963267 - logloss: 0.266475
-2022-04-12 16:58:21,444 P12228 INFO ******** Test evaluation ********
-2022-04-12 16:58:21,445 P12228 INFO Loading data...
-2022-04-12 16:58:21,445 P12228 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/test.h5
-2022-04-12 16:58:21,451 P12228 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
-2022-04-12 16:58:21,452 P12228 INFO Loading test data done.
-2022-04-12 16:58:22,400 P12228 INFO [Metrics] AUC: 0.963104 - logloss: 0.266696
-
-```
+## SAM_movielenslatest_x1
+
+A hands-on guide to run the SAM model on the MovielensLatest_x1 dataset.
+
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
+
+### Index
+[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
+
+### Environments
++ Hardware
+
+ ```python
+ CPU: Intel(R) Xeon(R) CPU E5-2690 v4 @ 2.6GHz
+ GPU: Tesla P100 16G
+ RAM: 755G
+
+ ```
+
++ Software
+
+ ```python
+ CUDA: 11.4
+ python: 3.6.5
+ pytorch: 1.0.1.post2
+ pandas: 0.23.0
+ numpy: 1.18.1
+ scipy: 1.1.0
+ sklearn: 0.23.1
+ pyyaml: 5.1
+ h5py: 2.7.1
+ tqdm: 4.59.0
+ fuxictr: 1.2.1
+ ```
+
+### Dataset
+Dataset ID: [MovielensLatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens#MovielensLatest_x1). Please refer to the dataset details to get data ready.
+
+### Code
+
+We use [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/tree/v1.2.1) for this experiment. See the model code: [SAM](https://github.com/reczoo/FuxiCTR/blob/v1.2.1/fuxictr/pytorch/models/SAM.py).
+
+Running steps:
+
+1. Download [FuxiCTR-v1.2.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.2.1.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+
+ ```python
+ sys.path.append('YOUR_PATH_TO_FuxiCTR/')
+ ```
+
+2. Create a data directory and put the downloaded csv files in `../data/Movielens/MovielensLatest_x1`.
+
+3. Both `dataset_config.yaml` and `model_config.yaml` files are available in [SAM_movielenslatest_x1_tuner_config_06](./SAM_movielenslatest_x1_tuner_config_06). Make sure the data paths in `dataset_config.yaml` are correctly set to what we create in the last step.
+
+4. Run the following script to start.
+
+ ```bash
+ cd SAM_movielenslatest_x1
+ nohup python run_expid.py --config ./SAM_movielenslatest_x1_tuner_config_06 --expid SAM_movielenslatest_x1_013_68a6bc8b --gpu 0 > run.log &
+ tail -f run.log
+ ```
+
+### Results
+
+| AUC | logloss |
+|:--------------------:|:--------------------:|
+| 0.963104 | 0.266696 |
+
+
+### Logs
+```python
+2022-04-12 16:54:38,091 P12228 INFO {
+ "aggregation": "weighted_pooling",
+ "batch_size": "4096",
+ "data_format": "csv",
+ "data_root": "../data/Movielens/",
+ "dataset_id": "movielenslatest_x1_cd32d937",
+ "debug": "False",
+ "embedding_dim": "10",
+ "embedding_regularizer": "0.001",
+ "epochs": "100",
+ "every_x_epochs": "1",
+ "feature_cols": "[{'active': True, 'dtype': 'float', 'name': ['user_id', 'item_id', 'tag_id'], 'type': 'categorical'}]",
+ "gpu": "1",
+ "interaction_type": "SAM3A",
+ "label_col": "{'dtype': 'float', 'name': 'label'}",
+ "learning_rate": "0.001",
+ "loss": "binary_crossentropy",
+ "metrics": "['AUC', 'logloss']",
+ "min_categr_count": "1",
+ "model": "SAM",
+ "model_id": "SAM_movielenslatest_x1_013_68a6bc8b",
+ "model_root": "./Movielens/SAM_movielenslatest_x1/",
+ "monitor": "AUC",
+ "monitor_mode": "max",
+ "net_dropout": "0",
+ "net_regularizer": "0",
+ "num_interaction_layers": "2",
+ "num_workers": "3",
+ "optimizer": "adam",
+ "patience": "2",
+ "pickle_feature_encoder": "True",
+ "save_best_only": "True",
+ "seed": "2021",
+ "shuffle": "True",
+ "task": "binary_classification",
+ "test_data": "../data/Movielens/MovielensLatest_x1/test.csv",
+ "train_data": "../data/Movielens/MovielensLatest_x1/train.csv",
+ "use_hdf5": "True",
+ "use_residual": "True",
+ "valid_data": "../data/Movielens/MovielensLatest_x1/valid.csv",
+ "verbose": "1",
+ "version": "pytorch"
+}
+2022-04-12 16:54:38,092 P12228 INFO Set up feature encoder...
+2022-04-12 16:54:38,092 P12228 INFO Load feature_map from json: ../data/Movielens/movielenslatest_x1_cd32d937/feature_map.json
+2022-04-12 16:54:38,092 P12228 INFO Loading data...
+2022-04-12 16:54:38,095 P12228 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/train.h5
+2022-04-12 16:54:38,123 P12228 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/valid.h5
+2022-04-12 16:54:38,132 P12228 INFO Train samples: total/1404801, pos/467878, neg/936923, ratio/33.31%, blocks/1
+2022-04-12 16:54:38,133 P12228 INFO Validation samples: total/401372, pos/134225, neg/267147, ratio/33.44%, blocks/1
+2022-04-12 16:54:38,133 P12228 INFO Loading train data done.
+2022-04-12 16:54:41,830 P12228 INFO Total number of parameters: 902984.
+2022-04-12 16:54:41,830 P12228 INFO Start training: 343 batches/epoch
+2022-04-12 16:54:41,831 P12228 INFO ************ Epoch=1 start ************
+2022-04-12 16:54:51,666 P12228 INFO [Metrics] AUC: 0.930376 - logloss: 0.301244
+2022-04-12 16:54:51,667 P12228 INFO Save best model: monitor(max): 0.930376
+2022-04-12 16:54:51,673 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:54:51,749 P12228 INFO Train loss: 0.427487
+2022-04-12 16:54:51,750 P12228 INFO ************ Epoch=1 end ************
+2022-04-12 16:55:00,419 P12228 INFO [Metrics] AUC: 0.933289 - logloss: 0.296415
+2022-04-12 16:55:00,419 P12228 INFO Save best model: monitor(max): 0.933289
+2022-04-12 16:55:00,426 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:55:00,541 P12228 INFO Train loss: 0.334632
+2022-04-12 16:55:00,541 P12228 INFO ************ Epoch=2 end ************
+2022-04-12 16:55:08,977 P12228 INFO [Metrics] AUC: 0.937246 - logloss: 0.287503
+2022-04-12 16:55:08,977 P12228 INFO Save best model: monitor(max): 0.937246
+2022-04-12 16:55:08,985 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:55:09,047 P12228 INFO Train loss: 0.321591
+2022-04-12 16:55:09,047 P12228 INFO ************ Epoch=3 end ************
+2022-04-12 16:55:18,340 P12228 INFO [Metrics] AUC: 0.940396 - logloss: 0.279409
+2022-04-12 16:55:18,340 P12228 INFO Save best model: monitor(max): 0.940396
+2022-04-12 16:55:18,348 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:55:18,401 P12228 INFO Train loss: 0.314695
+2022-04-12 16:55:18,401 P12228 INFO ************ Epoch=4 end ************
+2022-04-12 16:55:27,779 P12228 INFO [Metrics] AUC: 0.942787 - logloss: 0.273496
+2022-04-12 16:55:27,780 P12228 INFO Save best model: monitor(max): 0.942787
+2022-04-12 16:55:27,787 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:55:27,856 P12228 INFO Train loss: 0.306847
+2022-04-12 16:55:27,856 P12228 INFO ************ Epoch=5 end ************
+2022-04-12 16:55:37,908 P12228 INFO [Metrics] AUC: 0.944759 - logloss: 0.269057
+2022-04-12 16:55:37,909 P12228 INFO Save best model: monitor(max): 0.944759
+2022-04-12 16:55:37,920 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:55:37,978 P12228 INFO Train loss: 0.299294
+2022-04-12 16:55:37,978 P12228 INFO ************ Epoch=6 end ************
+2022-04-12 16:55:47,691 P12228 INFO [Metrics] AUC: 0.946652 - logloss: 0.264696
+2022-04-12 16:55:47,692 P12228 INFO Save best model: monitor(max): 0.946652
+2022-04-12 16:55:47,700 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:55:47,758 P12228 INFO Train loss: 0.291928
+2022-04-12 16:55:47,758 P12228 INFO ************ Epoch=7 end ************
+2022-04-12 16:55:57,385 P12228 INFO [Metrics] AUC: 0.949001 - logloss: 0.258957
+2022-04-12 16:55:57,386 P12228 INFO Save best model: monitor(max): 0.949001
+2022-04-12 16:55:57,393 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:55:57,467 P12228 INFO Train loss: 0.284887
+2022-04-12 16:55:57,467 P12228 INFO ************ Epoch=8 end ************
+2022-04-12 16:56:06,608 P12228 INFO [Metrics] AUC: 0.951515 - logloss: 0.253123
+2022-04-12 16:56:06,609 P12228 INFO Save best model: monitor(max): 0.951515
+2022-04-12 16:56:06,617 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:56:06,675 P12228 INFO Train loss: 0.277926
+2022-04-12 16:56:06,675 P12228 INFO ************ Epoch=9 end ************
+2022-04-12 16:56:16,250 P12228 INFO [Metrics] AUC: 0.953628 - logloss: 0.246790
+2022-04-12 16:56:16,251 P12228 INFO Save best model: monitor(max): 0.953628
+2022-04-12 16:56:16,258 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:56:16,312 P12228 INFO Train loss: 0.271264
+2022-04-12 16:56:16,312 P12228 INFO ************ Epoch=10 end ************
+2022-04-12 16:56:25,827 P12228 INFO [Metrics] AUC: 0.955708 - logloss: 0.241655
+2022-04-12 16:56:25,828 P12228 INFO Save best model: monitor(max): 0.955708
+2022-04-12 16:56:25,835 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:56:25,901 P12228 INFO Train loss: 0.264930
+2022-04-12 16:56:25,901 P12228 INFO ************ Epoch=11 end ************
+2022-04-12 16:56:35,385 P12228 INFO [Metrics] AUC: 0.957115 - logloss: 0.239097
+2022-04-12 16:56:35,385 P12228 INFO Save best model: monitor(max): 0.957115
+2022-04-12 16:56:35,392 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:56:35,459 P12228 INFO Train loss: 0.258424
+2022-04-12 16:56:35,460 P12228 INFO ************ Epoch=12 end ************
+2022-04-12 16:56:44,696 P12228 INFO [Metrics] AUC: 0.958158 - logloss: 0.237206
+2022-04-12 16:56:44,697 P12228 INFO Save best model: monitor(max): 0.958158
+2022-04-12 16:56:44,704 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:56:44,754 P12228 INFO Train loss: 0.250939
+2022-04-12 16:56:44,754 P12228 INFO ************ Epoch=13 end ************
+2022-04-12 16:56:54,347 P12228 INFO [Metrics] AUC: 0.958425 - logloss: 0.238402
+2022-04-12 16:56:54,348 P12228 INFO Save best model: monitor(max): 0.958425
+2022-04-12 16:56:54,355 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:56:54,427 P12228 INFO Train loss: 0.242757
+2022-04-12 16:56:54,427 P12228 INFO ************ Epoch=14 end ************
+2022-04-12 16:57:04,075 P12228 INFO [Metrics] AUC: 0.958673 - logloss: 0.240255
+2022-04-12 16:57:04,076 P12228 INFO Save best model: monitor(max): 0.958673
+2022-04-12 16:57:04,083 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:57:04,148 P12228 INFO Train loss: 0.236126
+2022-04-12 16:57:04,149 P12228 INFO ************ Epoch=15 end ************
+2022-04-12 16:57:13,373 P12228 INFO [Metrics] AUC: 0.958731 - logloss: 0.243396
+2022-04-12 16:57:13,374 P12228 INFO Save best model: monitor(max): 0.958731
+2022-04-12 16:57:13,380 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:57:13,433 P12228 INFO Train loss: 0.229093
+2022-04-12 16:57:13,433 P12228 INFO ************ Epoch=16 end ************
+2022-04-12 16:57:22,435 P12228 INFO [Metrics] AUC: 0.958769 - logloss: 0.247040
+2022-04-12 16:57:22,436 P12228 INFO Save best model: monitor(max): 0.958769
+2022-04-12 16:57:22,443 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:57:22,501 P12228 INFO Train loss: 0.223994
+2022-04-12 16:57:22,501 P12228 INFO ************ Epoch=17 end ************
+2022-04-12 16:57:31,730 P12228 INFO [Metrics] AUC: 0.958379 - logloss: 0.250961
+2022-04-12 16:57:31,731 P12228 INFO Monitor(max) STOP: 0.958379 !
+2022-04-12 16:57:31,731 P12228 INFO Reduce learning rate on plateau: 0.000100
+2022-04-12 16:57:31,731 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:57:31,796 P12228 INFO Train loss: 0.218576
+2022-04-12 16:57:31,796 P12228 INFO ************ Epoch=18 end ************
+2022-04-12 16:57:41,114 P12228 INFO [Metrics] AUC: 0.961894 - logloss: 0.250288
+2022-04-12 16:57:41,115 P12228 INFO Save best model: monitor(max): 0.961894
+2022-04-12 16:57:41,122 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:57:41,184 P12228 INFO Train loss: 0.172328
+2022-04-12 16:57:41,184 P12228 INFO ************ Epoch=19 end ************
+2022-04-12 16:57:50,463 P12228 INFO [Metrics] AUC: 0.962941 - logloss: 0.257173
+2022-04-12 16:57:50,464 P12228 INFO Save best model: monitor(max): 0.962941
+2022-04-12 16:57:50,471 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:57:50,545 P12228 INFO Train loss: 0.141895
+2022-04-12 16:57:50,546 P12228 INFO ************ Epoch=20 end ************
+2022-04-12 16:57:59,113 P12228 INFO [Metrics] AUC: 0.963267 - logloss: 0.266475
+2022-04-12 16:57:59,113 P12228 INFO Save best model: monitor(max): 0.963267
+2022-04-12 16:57:59,120 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:57:59,182 P12228 INFO Train loss: 0.123147
+2022-04-12 16:57:59,182 P12228 INFO ************ Epoch=21 end ************
+2022-04-12 16:58:07,262 P12228 INFO [Metrics] AUC: 0.963191 - logloss: 0.276652
+2022-04-12 16:58:07,263 P12228 INFO Monitor(max) STOP: 0.963191 !
+2022-04-12 16:58:07,263 P12228 INFO Reduce learning rate on plateau: 0.000010
+2022-04-12 16:58:07,263 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:58:07,315 P12228 INFO Train loss: 0.109783
+2022-04-12 16:58:07,316 P12228 INFO ************ Epoch=22 end ************
+2022-04-12 16:58:15,280 P12228 INFO [Metrics] AUC: 0.963187 - logloss: 0.277999
+2022-04-12 16:58:15,280 P12228 INFO Monitor(max) STOP: 0.963187 !
+2022-04-12 16:58:15,281 P12228 INFO Reduce learning rate on plateau: 0.000001
+2022-04-12 16:58:15,281 P12228 INFO Early stopping at epoch=23
+2022-04-12 16:58:15,281 P12228 INFO --- 343/343 batches finished ---
+2022-04-12 16:58:15,334 P12228 INFO Train loss: 0.098557
+2022-04-12 16:58:15,334 P12228 INFO Training finished.
+2022-04-12 16:58:15,334 P12228 INFO Load best model: /home/XXX/FuxiCTR/benchmarks/Movielens/SAM_movielenslatest_x1/movielenslatest_x1_cd32d937/SAM_movielenslatest_x1_013_68a6bc8b.model
+2022-04-12 16:58:18,900 P12228 INFO ****** Validation evaluation ******
+2022-04-12 16:58:21,366 P12228 INFO [Metrics] AUC: 0.963267 - logloss: 0.266475
+2022-04-12 16:58:21,444 P12228 INFO ******** Test evaluation ********
+2022-04-12 16:58:21,445 P12228 INFO Loading data...
+2022-04-12 16:58:21,445 P12228 INFO Loading data from h5: ../data/Movielens/movielenslatest_x1_cd32d937/test.h5
+2022-04-12 16:58:21,451 P12228 INFO Test samples: total/200686, pos/66850, neg/133836, ratio/33.31%, blocks/1
+2022-04-12 16:58:21,452 P12228 INFO Loading test data done.
+2022-04-12 16:58:22,400 P12228 INFO [Metrics] AUC: 0.963104 - logloss: 0.266696
+
+```
diff --git a/ranking/ctr/WideDeep/WideDeep_avazu_x1/README.md b/ranking/ctr/WideDeep/WideDeep_avazu_x1/README.md
index 6ae8bafd..94df7f8c 100644
--- a/ranking/ctr/WideDeep/WideDeep_avazu_x1/README.md
+++ b/ranking/ctr/WideDeep/WideDeep_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the WideDeep model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [WideDeep](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/WideDeep.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [WideDeep](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/WideDeep.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/WideDeep/WideDeep_avazu_x4_001/README.md b/ranking/ctr/WideDeep/WideDeep_avazu_x4_001/README.md
index 74ef24a9..2ab2bfd4 100644
--- a/ranking/ctr/WideDeep/WideDeep_avazu_x4_001/README.md
+++ b/ranking/ctr/WideDeep/WideDeep_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the WideDeep model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [WideDeep](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/WideDeep.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [WideDeep](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/WideDeep.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/WideDeep/WideDeep_avazu_x4_002/README.md b/ranking/ctr/WideDeep/WideDeep_avazu_x4_002/README.md
index 5a849d0f..bf8505c6 100644
--- a/ranking/ctr/WideDeep/WideDeep_avazu_x4_002/README.md
+++ b/ranking/ctr/WideDeep/WideDeep_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the WideDeep model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [WideDeep](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/WideDeep.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [WideDeep](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/WideDeep.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/WideDeep/WideDeep_criteo_x1/README.md b/ranking/ctr/WideDeep/WideDeep_criteo_x1/README.md
index 2e9fa250..8cf76093 100644
--- a/ranking/ctr/WideDeep/WideDeep_criteo_x1/README.md
+++ b/ranking/ctr/WideDeep/WideDeep_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the WideDeep model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [WideDeep](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/WideDeep.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [WideDeep](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/WideDeep.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/WideDeep/WideDeep_criteo_x4_001/README.md b/ranking/ctr/WideDeep/WideDeep_criteo_x4_001/README.md
index 000f3990..5f41a32d 100644
--- a/ranking/ctr/WideDeep/WideDeep_criteo_x4_001/README.md
+++ b/ranking/ctr/WideDeep/WideDeep_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the WideDeep model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [WideDeep](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/WideDeep.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [WideDeep](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/WideDeep.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/WideDeep/WideDeep_criteo_x4_002/README.md b/ranking/ctr/WideDeep/WideDeep_criteo_x4_002/README.md
index e146abaf..610f5574 100644
--- a/ranking/ctr/WideDeep/WideDeep_criteo_x4_002/README.md
+++ b/ranking/ctr/WideDeep/WideDeep_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the WideDeep model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [WideDeep](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/WideDeep.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [WideDeep](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/WideDeep.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/WideDeep/WideDeep_frappe_x1/README.md b/ranking/ctr/WideDeep/WideDeep_frappe_x1/README.md
index 317a7e70..9002101b 100644
--- a/ranking/ctr/WideDeep/WideDeep_frappe_x1/README.md
+++ b/ranking/ctr/WideDeep/WideDeep_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the WideDeep model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [WideDeep](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/WideDeep.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [WideDeep](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/WideDeep.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/WideDeep/WideDeep_kkbox_x1/README.md b/ranking/ctr/WideDeep/WideDeep_kkbox_x1/README.md
index fb360ccb..3cd94615 100644
--- a/ranking/ctr/WideDeep/WideDeep_kkbox_x1/README.md
+++ b/ranking/ctr/WideDeep/WideDeep_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the WideDeep model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [WideDeep](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/WideDeep.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [WideDeep](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/WideDeep.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/WideDeep/WideDeep_movielenslatest_x1/README.md b/ranking/ctr/WideDeep/WideDeep_movielenslatest_x1/README.md
index 9971d830..5bee8918 100644
--- a/ranking/ctr/WideDeep/WideDeep_movielenslatest_x1/README.md
+++ b/ranking/ctr/WideDeep/WideDeep_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the WideDeep model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [WideDeep](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/WideDeep.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [WideDeep](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/WideDeep.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/CIN_avazu_x1/README.md b/ranking/ctr/xDeepFM/CIN_avazu_x1/README.md
index 805024cd..7baf1f5f 100644
--- a/ranking/ctr/xDeepFM/CIN_avazu_x1/README.md
+++ b/ranking/ctr/xDeepFM/CIN_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/CIN_avazu_x4_001/README.md b/ranking/ctr/xDeepFM/CIN_avazu_x4_001/README.md
index 25091ce6..a7756180 100644
--- a/ranking/ctr/xDeepFM/CIN_avazu_x4_001/README.md
+++ b/ranking/ctr/xDeepFM/CIN_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/CIN_avazu_x4_002/README.md b/ranking/ctr/xDeepFM/CIN_avazu_x4_002/README.md
index b7250ab5..52a01668 100644
--- a/ranking/ctr/xDeepFM/CIN_avazu_x4_002/README.md
+++ b/ranking/ctr/xDeepFM/CIN_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/CIN_criteo_x1/README.md b/ranking/ctr/xDeepFM/CIN_criteo_x1/README.md
index 22934100..a05a029e 100644
--- a/ranking/ctr/xDeepFM/CIN_criteo_x1/README.md
+++ b/ranking/ctr/xDeepFM/CIN_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/CIN_criteo_x4_001/README.md b/ranking/ctr/xDeepFM/CIN_criteo_x4_001/README.md
index bfdca06a..428b6f45 100644
--- a/ranking/ctr/xDeepFM/CIN_criteo_x4_001/README.md
+++ b/ranking/ctr/xDeepFM/CIN_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/CIN_criteo_x4_002/README.md b/ranking/ctr/xDeepFM/CIN_criteo_x4_002/README.md
index 30b07ad8..507ed7ba 100644
--- a/ranking/ctr/xDeepFM/CIN_criteo_x4_002/README.md
+++ b/ranking/ctr/xDeepFM/CIN_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/CIN_frappe_x1/README.md b/ranking/ctr/xDeepFM/CIN_frappe_x1/README.md
index 61b1f706..f8bfee25 100644
--- a/ranking/ctr/xDeepFM/CIN_frappe_x1/README.md
+++ b/ranking/ctr/xDeepFM/CIN_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/CIN_kkbox_x1/README.md b/ranking/ctr/xDeepFM/CIN_kkbox_x1/README.md
index a82ec88a..9871e84f 100644
--- a/ranking/ctr/xDeepFM/CIN_kkbox_x1/README.md
+++ b/ranking/ctr/xDeepFM/CIN_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/CIN_movielenslatest_x1/README.md b/ranking/ctr/xDeepFM/CIN_movielenslatest_x1/README.md
index d66ee6f1..1d3bda82 100644
--- a/ranking/ctr/xDeepFM/CIN_movielenslatest_x1/README.md
+++ b/ranking/ctr/xDeepFM/CIN_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/xDeepFM_amazonelectronics_x1/README.md b/ranking/ctr/xDeepFM/xDeepFM_amazonelectronics_x1/README.md
index f52efded..5b644926 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_amazonelectronics_x1/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_amazonelectronics_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the AmazonElectronics_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [AmazonElectronics_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Amazon#AmazonElectronics_x1) to get data ready.
+Please refer to [AmazonElectronics_x1](https://github.com/reczoo/Datasets/tree/main/Amazon/AmazonElectronics_x1) to get the dataset details.
### Code
-We use the [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/xDeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/xDeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/xDeepFM/xDeepFM_avazu_x1/README.md b/ranking/ctr/xDeepFM/xDeepFM_avazu_x1/README.md
index 9615180b..767e3b20 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_avazu_x1/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_avazu_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Avazu_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x1](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/xDeepFM_avazu_x4_001/README.md b/ranking/ctr/xDeepFM/xDeepFM_avazu_x4_001/README.md
index 53df8fc3..512ff0aa 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_avazu_x4_001/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_avazu_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Avazu_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/xDeepFM_avazu_x4_002/README.md b/ranking/ctr/xDeepFM/xDeepFM_avazu_x4_002/README.md
index be760510..876b01bc 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_avazu_x4_002/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_avazu_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Avazu_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Avazu_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Avazu/README.md#Avazu_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Avazu_x4](https://github.com/reczoo/Datasets/tree/main/Avazu/Avazu_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/xDeepFM_criteo_x1/README.md b/ranking/ctr/xDeepFM/xDeepFM_criteo_x1/README.md
index b8153f85..04ad0f38 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_criteo_x1/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_criteo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Criteo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x1](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/xDeepFM_criteo_x4_001/README.md b/ranking/ctr/xDeepFM/xDeepFM_criteo_x4_001/README.md
index 5c146444..2771bc64 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_criteo_x4_001/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_criteo_x4_001/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Criteo_x4_001 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_001](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_001). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/xDeepFM_criteo_x4_002/README.md b/ranking/ctr/xDeepFM/xDeepFM_criteo_x4_002/README.md
index 68af52a3..8d1e3728 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_criteo_x4_002/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_criteo_x4_002/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Criteo_x4_002 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Criteo_x4_002](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Criteo/README.md#Criteo_x4_002). Please refer to the dataset details to get data ready.
+Dataset ID: [Criteo_x4](https://github.com/reczoo/Datasets/tree/main/Criteo/Criteo_x4). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/xDeepFM_frappe_x1/README.md b/ranking/ctr/xDeepFM/xDeepFM_frappe_x1/README.md
index f63569ff..174eaf5a 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_frappe_x1/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_frappe_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Frappe_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Frappe_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/Frappe/README.md#Frappe_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [Frappe_x1](https://github.com/reczoo/Datasets/tree/main/Frappe/Frappe_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/xDeepFM_kkbox_x1/README.md b/ranking/ctr/xDeepFM/xDeepFM_kkbox_x1/README.md
index 443ce6c4..9e2dd369 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_kkbox_x1/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_kkbox_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the KKBox_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -38,11 +38,11 @@ Dataset ID: [KKBox_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_pre
### Code
-We use [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/tree/v1.0.2) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.0.2/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.0.2](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.0.2](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.0.2.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/xDeepFM_kuaivideo_x1/README.md b/ranking/ctr/xDeepFM/xDeepFM_kuaivideo_x1/README.md
index 2b46e5fc..1ecbd82b 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_kuaivideo_x1/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_kuaivideo_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the KuaiVideo_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [KuaiVideo_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/KuaiShou#KuaiVideo_x1) to get data ready.
+Please refer to [KuaiVideo_x1](https://github.com/reczoo/Datasets/tree/main/KuaiShou/KuaiVideo_x1) to get the dataset details.
### Code
-We use the [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/xDeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/xDeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/xDeepFM/xDeepFM_microvideo1.7m_x1/README.md b/ranking/ctr/xDeepFM/xDeepFM_microvideo1.7m_x1/README.md
index df0b6eef..7248e9f4 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_microvideo1.7m_x1/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_microvideo1.7m_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the MicroVideo1.7M_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [MicroVideo1.7M_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/MicroVideo1.7M#MicroVideo17M_x1) to get data ready.
+Please refer to [MicroVideo1.7M_x1](https://github.com/reczoo/Datasets/tree/main/MicroVideo/MicroVideo1.7M_x1) to get the dataset details.
### Code
-We use the [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/xDeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/xDeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/ranking/ctr/xDeepFM/xDeepFM_movielenslatest_x1/README.md b/ranking/ctr/xDeepFM/xDeepFM_movielenslatest_x1/README.md
index 2d4775a2..b6c52c31 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_movielenslatest_x1/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_movielenslatest_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the Movielenslatest_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
### Index
[Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs)
@@ -34,15 +34,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Dataset ID: [Movielenslatest_x1](https://github.com/openbenchmark/BARS/blob/master/ctr_prediction/datasets/MovieLens/README.md#Movielenslatest_x1). Please refer to the dataset details to get data ready.
+Dataset ID: [MovielensLatest_x1](https://github.com/reczoo/Datasets/tree/main/MovieLens/MovielensLatest_x1). Please refer to the dataset details to get data ready.
### Code
-We use [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
+We use [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/tree/v1.1.0) for this experiment. See the model code: [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v1.1.0/fuxictr/pytorch/models/xDeepFM.py).
Running steps:
-1. Download [FuxiCTR-v1.1.0](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
+1. Download [FuxiCTR-v1.1.0](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v1.1.0.zip) and install all the dependencies listed in the [environments](#environments). Then modify [run_expid.py](./run_expid.py#L5) to add the FuxiCTR library to system path
```python
sys.path.append('YOUR_PATH_TO_FuxiCTR/')
diff --git a/ranking/ctr/xDeepFM/xDeepFM_taobaoad_x1/README.md b/ranking/ctr/xDeepFM/xDeepFM_taobaoad_x1/README.md
index 1a33a3d0..5bed129c 100644
--- a/ranking/ctr/xDeepFM/xDeepFM_taobaoad_x1/README.md
+++ b/ranking/ctr/xDeepFM/xDeepFM_taobaoad_x1/README.md
@@ -2,7 +2,7 @@
A hands-on guide to run the xDeepFM model on the TaobaoAd_x1 dataset.
-Author: [XUEPAI](https://github.com/xue-pai)
+Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)
| [Environments](#Environments) | [Dataset](#Dataset) | [Code](#Code) | [Results](#Results) | [Logs](#Logs) |
@@ -35,15 +35,15 @@ Author: [XUEPAI](https://github.com/xue-pai)
```
### Dataset
-Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready.
+Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details.
### Code
-We use the [xDeepFM](https://github.com/xue-pai/FuxiCTR/blob/v2.0.1/model_zoo/xDeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/tree/v2.0.1) for this experiment.
+We use the [xDeepFM](https://github.com/reczoo/FuxiCTR/blob/v2.0.1/model_zoo/xDeepFM) model code from [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/tree/v2.0.1) for this experiment.
Running steps:
-1. Download [FuxiCTR-v2.0.1](https://github.com/xue-pai/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
+1. Download [FuxiCTR-v2.0.1](https://github.com/reczoo/FuxiCTR/archive/refs/tags/v2.0.1.zip) and install all the dependencies listed in the [environments](#environments).
```bash
pip uninstall fuxictr
diff --git a/scripts/gen_readme_md.py b/scripts/gen_readme_md.py
index c0326178..2990d9d1 100644
--- a/scripts/gen_readme_md.py
+++ b/scripts/gen_readme_md.py
@@ -4,7 +4,7 @@
import pandas as pd
-input_dir = "../ranking/ctr/PPNet/PPNet_taobao_x1"
+input_dir = "../ranking/ctr/FinalNet/FinalNet_criteo_x4_001"
model_id = os.path.basename(input_dir)
model_name = input_dir.split("ctr/")[1].split("/")[0]
@@ -28,14 +28,22 @@
# dataset_id = "MicroVideo1.7M_x1"
# short_dataset_id = "MicroVideo1.7M_x1"
-dataset_name = "Taobao"
-dataset_id = "TaobaoAd_x1"
-short_dataset_id = "TaobaoAd_x1"
+# dataset_name = "Taobao"
+# dataset_id = "TaobaoAd_x1"
+# short_dataset_id = "TaobaoAd_x1"
# dataset_name = "Frappe"
# dataset_id = "Frappe_x1"
# short_dataset_id = "Frappe_x1"
+# dataset_name = "Avazu"
+# dataset_id = "Avazu_x4"
+# short_dataset_id = "Avazu_x4"
+
+dataset_name = "Criteo"
+dataset_id = "Criteo_x4"
+short_dataset_id = "Criteo_x4"
+
dataset_url = f"https://github.com/reczoo/Datasets/tree/main/{dataset_name}/{short_dataset_id}"
print(dataset_url)
diff --git a/scripts/revise_readme_md.py b/scripts/revise_readme_md.py
new file mode 100644
index 00000000..968d5a81
--- /dev/null
+++ b/scripts/revise_readme_md.py
@@ -0,0 +1,21 @@
+import regex
+import glob
+
+data_path = "../ranking/**/*.md"
+
+file_list = glob.glob(data_path, recursive=True)
+for f in file_list:
+ if "taobaoad_x1" in f:
+ print(f)
+ with open(f, "r") as fd:
+ res = fd.read()
+ phrase = "Author: [XUEPAI](https://github.com/xue-pai)"
+ correct = "Author: [BARS Benchmark](https://github.com/reczoo/BARS/blob/main/CITATION)"
+ res = res.replace(phrase, correct)
+ phrase = "Please refer to the BARS dataset [TaobaoAd_x1](https://github.com/openbenchmark/BARS/blob/main/datasets/Taobao#TaobaoAd_x1) to get data ready."
+ correct = "Please refer to [TaobaoAd_x1](https://github.com/reczoo/Datasets/tree/main/Taobao/TaobaoAd_x1) to get the dataset details."
+ res = res.replace(phrase, correct)
+ res = res.replace("xue-pai", "reczoo")
+ # print(res[0:200])
+ with open(f, "w") as fd:
+ fd.write(res)