@inproceedings{liu2025multioffRL, author = {Liu, Langming and Wang, Wanyu and Zhang, Chi and Li, Bo and Yin, Hongzhi and Wei, Xuetao and Su, Wenbo and Zheng, Bo and Zhao, Xiangyu}, title = {Multi-task Offline Reinforcement Learning for Online Advertising in Recommender Systems}, year = {2025}, isbn = {9798400714542}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3711896.3737250}, doi = {10.1145/3711896.3737250}, abstract = {Online advertising in recommendation platforms has gained significant attention, with a predominant focus on channel recommendation and budget allocation strategies. However, current offline reinforcement learning (RL) methods face substantial challenges when applied to sparse advertising scenarios, primarily due to severe overestimation, distributional shifts, and overlooking budget constraints. To address these issues, we propose MTORL, a novel multi-task offline RL model that targets two key objectives. First, we establish a Markov Decision Process (MDP) framework specific to the nuances of advertising. Then, we develop a causal state encoder to capture dynamic user interests and temporal dependencies, facilitating offline RL through conditional sequence modeling. Causal attention mechanisms are introduced to enhance user sequence representations by identifying correlations among causal states. We employ multi-task learning to decode actions and rewards, simultaneously addressing channel recommendation and budget allocation. Notably, our framework includes an automated system for integrating these tasks into online advertising. Extensive experiments on offline and online environments demonstrate MTORL's superiority over state-of-the-art methods. The code is available online at https://github.com/Applied-Machine-Learning-Lab/MTORL.}, booktitle = {Proceedings of the 31st ACM SIGKDD Conference on Knowledge Discovery and Data Mining V.2}, pages = {4635–4646}, numpages = {12}, keywords = {advertising, multi-task learning, offline reinforcement learning}, location = {Toronto ON, Canada}, series = {KDD '25} }