游戏中的Q-learning未按预期工作

10
我尝试将Q学习应用于我写的一个简单游戏中。这个游戏是基于玩家必须“跳跃”以避免迎面而来的箱子。
我使用了两个动作设计系统;“跳跃”和“不做任何事”,状态是下一块的距离(除以地板确保没有大量的状态)。
我的问题似乎在于算法实现没有考虑“未来奖励”,因此它最终会在错误的时候跳跃。
以下是我的Q学习算法实现:
JumpGameAIClass.prototype.getQ = function getQ(state) {
    if (!this.Q.hasOwnProperty(state)) {
        this.Q[state] = {};

        for (var actionIndex = 0; actionIndex < this.actions.length; actionIndex++) {
            var action = this.actions[actionIndex];

            this.Q[state][action] = 0;
        }
    }

    return this.Q[state];
};

JumpGameAIClass.prototype.getBlockDistance = function getBlockDistance() {
    var closest = -1;

    for (var blockIndex = 0; blockIndex < this.blocks.length; blockIndex++) {
        var block = this.blocks[blockIndex];

        var distance = block.x - this.playerX;

        if (distance >= 0 && (closest === -1 || distance < closest)) {
            closest = distance;
        }
    }

    return Math.max(0, Math.floor(closest * this.resolution));
};

JumpGameAIClass.prototype.getActionWithHighestQ = function getActionWithHighestQ(distance) {
    var jumpReward = this.getQ(distance)[this.actions[0]];
    var doNothingReward = this.getQ(distance)[this.actions[1]];

    if (jumpReward > doNothingReward) {
        return this.actions[0];
    } else if (doNothingReward > jumpReward) {
        return this.actions[1];
    } else {
        if (!this.canJump()) {
            return this.actions[1];
        }

        return this.actions[Math.floor(Math.random() * this.actions.length)];
    }
};

JumpGameAIClass.prototype.getActionEpsilonGreedy = function getActionEpsilonGreedy() {
    // We can't jump while in mid-air
    if (!this.canJump()) {
        return this.actions[1];
    }

    if (Math.random() < this.epsilon) {
        return this.actions[Math.floor(Math.random() * this.actions.length)];
    } else {
        return this.getActionWithHighestQ(this.getBlockDistance());
    }
};

JumpGameAIClass.prototype.think = function think() {
    var reward = this.liveReward;

    if (this.score !== this.lastScore) {
        this.lastScore = this.score;
        reward = this.scoreReward;
    } else if (!this.playerAlive) {
        reward = this.deathReward;
    }

    this.drawDistance();

    var distance = this.getBlockDistance(),
        maxQ = this.getQ(distance)[this.getActionWithHighestQ(distance)],
        previousQ = this.getQ(this.lastDistance)[this.lastAction];

    this.getQ(this.lastDistance)[this.lastAction] = previousQ + this.alpha * (reward + (this.gamma * maxQ) - previousQ);

    this.lastAction = this.getActionEpsilonGreedy();
    this.lastDistance = distance;

    switch (this.lastAction) {
        case this.actions[0]:
            this.jump();
            break;
    }
};

以下是一些与其相关的属性:

epsilon: 0.05,
alpha: 1,
gamma: 1,
resolution: 0.1,
actions: [ 'jump', 'do_nothing' ],
Q: {},
liveReward: 0,
scoreReward: 100,
deathReward: -1000,
lastAction: 'do_nothing',
lastDistance: 0,
lastScore: 0

我需要使用lastAction/lastDistance来计算Q值,因为我不能使用当前数据(这将会在前一帧执行的动作上进行操作)。

think方法在所有渲染和游戏内容完成后(物理、控制、死亡等)每一帧都会被调用。

var JumpGameAIClass = function JumpGame(canvas) {
    Game.JumpGame.call(this, canvas);

    Object.defineProperties(this, {
        epsilon: {
            value: 0.05
        },

        alpha: {
            value: 1
        },

        gamma: {
            value: 1
        },

        resolution: {
            value: 0.1
        },

        actions: {
            value: [ 'jump', 'do_nothing' ]
        },

        Q: {
            value: { },
            writable: true
        },

        liveReward: {
            value: 0
        },

        scoreReward: {
            value: 100
        },

        deathReward: {
            value: -1000
        },

        lastAction: {
            value: 'do_nothing',
            writable: true
        },

        lastDistance: {
            value: 0,
            writable: true
        },

        lastScore: {
            value: 0,
            writable: true
        }
    });
};

JumpGameAIClass.prototype = Object.create(Game.JumpGame.prototype);

JumpGameAIClass.prototype.getQ = function getQ(state) {
    if (!this.Q.hasOwnProperty(state)) {
        this.Q[state] = {};

        for (var actionIndex = 0; actionIndex < this.actions.length; actionIndex++) {
            var action = this.actions[actionIndex];

            this.Q[state][action] = 0;
        }
    }

    return this.Q[state];
};

JumpGameAIClass.prototype.getBlockDistance = function getBlockDistance() {
    var closest = -1;

    for (var blockIndex = 0; blockIndex < this.blocks.length; blockIndex++) {
        var block = this.blocks[blockIndex];

        var distance = block.x - this.playerX;

        if (distance >= 0 && (closest === -1 || distance < closest)) {
            closest = distance;
        }
    }

    return Math.max(0, Math.floor(closest * this.resolution));
};

JumpGameAIClass.prototype.getActionWithHighestQ = function getActionWithHighestQ(distance) {
    var jumpReward = this.getQ(distance)[this.actions[0]];
    var doNothingReward = this.getQ(distance)[this.actions[1]];

    if (jumpReward > doNothingReward) {
        return this.actions[0];
    } else if (doNothingReward > jumpReward) {
        return this.actions[1];
    } else {
        if (!this.canJump()) {
            return this.actions[1];
        }

        return this.actions[Math.floor(Math.random() * this.actions.length)];
    }
};

JumpGameAIClass.prototype.getActionEpsilonGreedy = function getActionEpsilonGreedy() {
    if (!this.canJump()) {
        return this.actions[1];
    }

    if (Math.random() < this.epsilon) {
        return this.actions[Math.floor(Math.random() * this.actions.length)];
    } else {
        return this.getActionWithHighestQ(this.getBlockDistance());
    }
};

JumpGameAIClass.prototype.onDeath = function onDeath() {
    this.restart();
};

JumpGameAIClass.prototype.think = function think() {
    var reward = this.liveReward;

    if (this.score !== this.lastScore) {
        this.lastScore = this.score;
        reward = this.scoreReward;
    } else if (!this.playerAlive) {
        reward = this.deathReward;
    }

    this.drawDistance();

    var distance = this.getBlockDistance(),
        maxQ = this.getQ(distance)[this.getActionWithHighestQ(distance)],
        previousQ = this.getQ(this.lastDistance)[this.lastAction];

    this.getQ(this.lastDistance)[this.lastAction] = previousQ + this.alpha * (reward + (this.gamma * maxQ) - previousQ);

    this.lastAction = this.getActionEpsilonGreedy();
    this.lastDistance = distance;

    switch (this.lastAction) {
        case this.actions[0]:
            this.jump();
            break;
    }
};

JumpGameAIClass.prototype.drawDistance = function drawDistance() {
    this.context.save();

    this.context.textAlign = 'center';
    this.context.textBaseline = 'bottom';

    this.context.fillText('Distance: ' + this.getBlockDistance(), this.canvasWidth / 2, this.canvasHeight / 4);

    this.context.textBaseline = 'top';

    this.context.fillText('Last Distance: ' + this.lastDistance, this.canvasWidth / 2, this.canvasHeight / 4);

    this.context.restore();
};

JumpGameAIClass.prototype.onFrame = function onFrame() {
    Game.JumpGame.prototype.onFrame.apply(this, arguments);

    this.think();
}

Game.JumpGameAI = JumpGameAIClass;
body {
    background-color: #EEEEEE;
    text-align: center;
}

canvas#game {
    background-color: #FFFFFF;
    border: 1px solid #DDDDDD;
}
<!DOCTYPE HTML>
<html lang="en">
<head>
    <title>jump</title>
</head>
<body>
    <canvas id="game" width="512" height="512">
        <h1>Your browser doesn't support canvas!</h1>
    </canvas>
  
    <script src="https://raw.githubusercontent.com/cagosta/requestAnimationFrame/master/app/requestAnimationFrame.js"></script>
  
    <!-- https://gist.github.com/jackwilsdon/d06bffa6b32c53321478 -->
  
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/4e467f82590e76543bf55ff788504e26afc3d694/game.js"></script>
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/2b7ce2c3dd268c4aef9ad27316edb0b235ad0d06/canvasgame.js"></script>
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/2696c72e001e48359a6ce880f1c475613fe359f5/jump.js"></script>
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/249c92f3385757b6edf2ceb49e26f14b89ffdcfe/bootstrap.js"></script>
</body>


1
你为什么使用 gamma = 1?你尝试过更经典的 0.9 吗? - Demplo
此外,你的环境是否具有确定性?也就是说,如果玩家在正确时刻跳跃,它能够避开箱子吗? - Demplo
我尝试了一个0.9的伽马值,但是没有明显的差异。环境是确定性的,因为箱子以固定间隔出现,没有“随机性”。如果你从箱子上跳过一定的距离,你就能保证避开箱子。看起来,“跳跃”和“不做任何事情”的Q值非常接近(如果不是相同的),这似乎不太对。 - Jack Wilsdon
2个回答

5
你基本上有一个简化版的:

enter image description here

来源:Flappy Bird RL

我使用了以下数值:

    epsilon: {
        value: 0.01
    },
    alpha: {
        value: 0.7
    },
    gamma: {
        value: 0.9
    },
    resolution: {
        value: 0.1
    },  
    liveReward: {
        value: 10
    },
    scoreReward: {
        value: -100
    },
    deathReward: {
        value: 1000
    },

它在前20次尝试中轻松地超过了100。


Q-learning可以用时间逻辑来描述。

Q(s, a)=r(s,a)+gamma*max_a'(Q(s', a'))

在此执行

选择一个动作a并执行它

  1. 对于每个状态-动作对(s, a),将表格条目Q(s, a)初始化为零
  2. 观察当前状态s
  3. 永远执行以下步骤:
    • 选择一个动作a并执行它
    • 接收即时奖励r,也称为Q(s, a)
    • 观察新状态s'
    • 更新表格条目Q(s, a)=r(s,a)+gamma*max_a'(Q(s', a'))
    • s=s'
  • r(s,a) = r = 立即奖励
  • gamma = 延迟奖励相对于立即奖励的价值(0到1)
  • s' = 动作a后的新状态
  • a = 状态s中的动作
  • a' = 状态s'中的动作

1
请注意,scoreReward和deathReward应该具有相反的符号。 - Margus
我基于Flappy Bird RL的一些代码。你能解释一下为什么得分奖励是负数,而死亡奖励是正数吗?我原以为应该相反:尝试最大化奖励。 - Jack Wilsdon
1
@jackwilsdon 这是不直观的部分,这种贪婪的Q-学习器能够根据给定的参数工作,但是从语义上(即意义)来讲,它并不正确。 - Margus
那么 max_a' 是状态 s' 的最大奖励吗?如何决定在 Q(s', a') 中使用 **a'**?使用“永远执行”第一步相同的算法吗?(选择一个动作 a 并执行它)? - Jack Wilsdon

2

您对该算法的实现已经很好了,只需要调整一些参数即可。

如果您为生存分配某些奖励,例如本例中的10,并将epsilon设置为0,则可以获得一个胜利的AI。

例如:

var JumpGameAIClass = function JumpGame(canvas) {
    Game.JumpGame.call(this, canvas);

    Object.defineProperties(this, {
        epsilon: {
            value: 0
        },

        alpha: {
            value: 1
        },

        gamma: {
            value: 1
        },

        resolution: {
            value: 0.1
        },

        actions: {
            value: [ 'jump', 'do_nothing' ]
        },

        Q: {
            value: { },
            writable: true
        },

        liveReward: {
            value: 0
        },

        scoreReward: {
            value: 100
        },

        deathReward: {
            value: -1000
        },

        lastAction: {
            value: 'do_nothing',
            writable: true
        },

        lastDistance: {
            value: 0,
            writable: true
        },

        lastScore: {
            value: 0,
            writable: true
        }
    });
};

JumpGameAIClass.prototype = Object.create(Game.JumpGame.prototype);

JumpGameAIClass.prototype.getQ = function getQ(state) {
    if (!this.Q.hasOwnProperty(state)) {
        this.Q[state] = {};

        for (var actionIndex = 0; actionIndex < this.actions.length; actionIndex++) {
            var action = this.actions[actionIndex];

            this.Q[state][action] = 0;
        }
    }

    return this.Q[state];
};

JumpGameAIClass.prototype.getBlockDistance = function getBlockDistance() {
    var closest = -1;

    for (var blockIndex = 0; blockIndex < this.blocks.length; blockIndex++) {
        var block = this.blocks[blockIndex];

        var distance = block.x - this.playerX;

        if (distance >= 0 && (closest === -1 || distance < closest)) {
            closest = distance;
        }
    }

    return Math.max(0, Math.floor(closest * this.resolution));
};

JumpGameAIClass.prototype.getActionWithHighestQ = function getActionWithHighestQ(distance) {
    var jumpReward = this.getQ(distance)[this.actions[0]];
    var doNothingReward = this.getQ(distance)[this.actions[1]];
    
    if (!this.canJump()) {
        return this.actions[1];
    } else if (jumpReward > doNothingReward) {
        return this.actions[0];
    } else if (doNothingReward > jumpReward) {
        return this.actions[1];
    } else {   
        return this.actions[Math.floor(Math.random() * this.actions.length)];
    }
};

JumpGameAIClass.prototype.getActionEpsilonGreedy = function getActionEpsilonGreedy() {
    if (!this.canJump()) {
        return this.actions[1];
    }

    if (Math.random() < this.epsilon) {
        return this.actions[Math.floor(Math.random() * this.actions.length)];
    } else {
        return this.getActionWithHighestQ(this.getBlockDistance());
    }
};

JumpGameAIClass.prototype.onDeath = function onDeath() {
    this.restart();
};

JumpGameAIClass.prototype.think = function think() {
    var reward = this.liveReward;

    if (this.score !== this.lastScore) {
        this.lastScore = this.score;
        reward = this.scoreReward;
    } else if (!this.playerAlive) {
        reward = this.deathReward;
    }

    this.drawDistance();

    var distance = this.getBlockDistance(),
        maxQ = this.playerAlive ? this.getQ(distance)[this.getActionWithHighestQ(distance)] : 0,
        previousQ = this.getQ(this.lastDistance)[this.lastAction];

    this.getQ(this.lastDistance)[this.lastAction] = previousQ + this.alpha * (reward + (this.gamma * maxQ) - previousQ);

    this.lastAction = this.getActionEpsilonGreedy();
    this.lastDistance = distance;

    switch (this.lastAction) {
        case this.actions[0]:
            this.jump();
            break;
    }
};

JumpGameAIClass.prototype.drawDistance = function drawDistance() {
    this.context.save();

    this.context.textAlign = 'center';
    this.context.textBaseline = 'bottom';

    this.context.fillText('Distance: ' + this.getBlockDistance(), this.canvasWidth / 2, this.canvasHeight / 4);

    this.context.textBaseline = 'top';

    this.context.fillText('Last Distance: ' + this.lastDistance, this.canvasWidth / 2, this.canvasHeight / 4);

    this.context.restore();
};

JumpGameAIClass.prototype.onFrame = function onFrame() {
    Game.JumpGame.prototype.onFrame.apply(this, arguments);

    this.think();
}

Game.JumpGameAI = JumpGameAIClass;
body {
    background-color: #EEEEEE;
    text-align: center;
}

canvas#game {
    background-color: #FFFFFF;
    border: 1px solid #DDDDDD;
}
<!DOCTYPE HTML>
<html lang="en">
<head>
    <title>jump</title>
</head>
<body>
    <canvas id="game" width="512" height="512">
        <h1>Your browser doesn't support canvas!</h1>
    </canvas>
  
    <script src="https://raw.githubusercontent.com/cagosta/requestAnimationFrame/master/app/requestAnimationFrame.js"></script>
  
    <!-- https://gist.github.com/jackwilsdon/d06bffa6b32c53321478 -->
  
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/4e467f82590e76543bf55ff788504e26afc3d694/game.js"></script>
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/2b7ce2c3dd268c4aef9ad27316edb0b235ad0d06/canvasgame.js"></script>
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/2696c72e001e48359a6ce880f1c475613fe359f5/jump.js"></script>
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/249c92f3385757b6edf2ceb49e26f14b89ffdcfe/bootstrap.js"></script>
</body>

更新:

我再仔细想了一下,虽然我的示例似乎能够工作,但是它不正确。

问题在于,由于跳跃的结果要到未来的几次迭代才能知道,因此为存活分配即时奖励会导致每个状态下所做的任意随机决策重复,直到最终决策结果传播回状态。

由于游戏物理特性,玩家的跳跃距离小于方块间距,这意味着清除方块的跳跃将落在距离上一个方块起点更远的位置,因此可以再次进行相同的跳跃。因此,只要在第一个方块之前进行“好”的跳跃,系统就会立即收敛到成功模式。如果游戏物理特性不同或者进行“坏”的跳跃,则该AI可能无法自我纠正。

问题在于该系统实际上有两个部分的状态,即blockDistance和playerY。如果不包括playerY状态在决策中,则无法正确地将跳跃的结果传播回其起源。

在这个简单的游戏中,您可以通过偏向于不采取行动来解决这个问题。只要基于距离的决策状态完成,即如果不跳跃,则死亡的结果将正确地传播回每个距离处的不跳跃决策。这仍然有点奇怪,因为一旦您跳跃,奖励的传播就不正确了,但是您现在可以看到它正在学习。

示例:

var JumpGameAIClass = function JumpGame(canvas) {
    Game.JumpGame.call(this, canvas);

    Object.defineProperties(this, {
        epsilon: {
            value: 0
        },

        alpha: {
            value: 1
        },

        gamma: {
            value: 1
        },

        resolution: {
            value: 0.1
        },

        actions: {
            value: [ 'jump', 'do_nothing' ]
        },

        Q: {
            value: { },
            writable: true
        },

        liveReward: {
            value: 10
        },

        scoreReward: {
            value: 100
        },

        deathReward: {
            value: -1000
        },

        lastAction: {
            value: 'do_nothing',
            writable: true
        },

        lastDistance: {
            value: 0,
            writable: true
        },

        lastScore: {
            value: 0,
            writable: true
        }
    });
};

JumpGameAIClass.prototype = Object.create(Game.JumpGame.prototype);

JumpGameAIClass.prototype.getQ = function getQ(state) {
    if (!this.Q.hasOwnProperty(state)) {
        this.Q[state] = {};

        for (var actionIndex = 0; actionIndex < this.actions.length; actionIndex++) {
            var action = this.actions[actionIndex];

            this.Q[state][action] = 0;
        }
    }

    return this.Q[state];
};

JumpGameAIClass.prototype.getBlockDistance = function getBlockDistance() {
    var closest = -1;

    for (var blockIndex = 0; blockIndex < this.blocks.length; blockIndex++) {
        var block = this.blocks[blockIndex];

        var distance = block.x - this.playerX;

        if (distance >= 0 && (closest === -1 || distance < closest)) {
            closest = distance;
        }
    }

    return Math.max(0, Math.floor(closest * this.resolution));
};

JumpGameAIClass.prototype.getActionWithHighestQ = function getActionWithHighestQ(distance) {
    var jumpReward = this.getQ(distance)[this.actions[0]];
    var doNothingReward = this.getQ(distance)[this.actions[1]];

if (!this.canJump() || doNothingReward >= jumpReward) {
 return this.actions[1];
} else {
 return this.actions[0];
}    
};

JumpGameAIClass.prototype.getActionEpsilonGreedy = function getActionEpsilonGreedy() {
    if (!this.canJump()) {
        return this.actions[1];
    }

    if (Math.random() < this.epsilon) {
        return this.actions[Math.floor(Math.random() * this.actions.length)];
    } else {
        return this.getActionWithHighestQ(this.getBlockDistance());
    }
};

JumpGameAIClass.prototype.onDeath = function onDeath() {
    this.restart();
};

JumpGameAIClass.prototype.think = function think() {
    var reward = this.liveReward;

    if (this.score !== this.lastScore) {
        this.lastScore = this.score;
        reward = this.scoreReward;
    } else if (!this.playerAlive) {
        reward = this.deathReward;
    }

    this.drawDistance();

    var distance = this.getBlockDistance(),
        maxQ = this.playerAlive ? this.getQ(distance)[this.getActionWithHighestQ(distance)] : 0,
        previousQ = this.getQ(this.lastDistance)[this.lastAction];

    this.getQ(this.lastDistance)[this.lastAction] = previousQ + this.alpha * (reward + (this.gamma * maxQ) - previousQ);

    this.lastAction = this.getActionEpsilonGreedy();
    this.lastDistance = distance;

    switch (this.lastAction) {
        case this.actions[0]:
            this.jump();
            break;
    }
};

JumpGameAIClass.prototype.drawDistance = function drawDistance() {
    this.context.save();

    this.context.textAlign = 'center';
    this.context.textBaseline = 'bottom';

    this.context.fillText('Distance: ' + this.getBlockDistance(), this.canvasWidth / 2, this.canvasHeight / 4);

    this.context.textBaseline = 'top';

    this.context.fillText('Last Distance: ' + this.lastDistance, this.canvasWidth / 2, this.canvasHeight / 4);

    this.context.restore();
};

JumpGameAIClass.prototype.onFrame = function onFrame() {
    Game.JumpGame.prototype.onFrame.apply(this, arguments);

    this.think();
}

Game.JumpGameAI = JumpGameAIClass;
body {
    background-color: #EEEEEE;
    text-align: center;
}

canvas#game {
    background-color: #FFFFFF;
    border: 1px solid #DDDDDD;
}
<!DOCTYPE HTML>
<html lang="en">
<head>
    <title>jump</title>
</head>
<body>
    <canvas id="game" width="512" height="512">
        <h1>Your browser doesn't support canvas!</h1>
    </canvas>
  
    <script src="https://raw.githubusercontent.com/cagosta/requestAnimationFrame/master/app/requestAnimationFrame.js"></script>
  
    <!-- https://gist.github.com/jackwilsdon/d06bffa6b32c53321478 -->
  
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/4e467f82590e76543bf55ff788504e26afc3d694/game.js"></script>
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/2b7ce2c3dd268c4aef9ad27316edb0b235ad0d06/canvasgame.js"></script>
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/2696c72e001e48359a6ce880f1c475613fe359f5/jump.js"></script>
    <script src="https://cdn.rawgit.com/jackwilsdon/d06bffa6b32c53321478/raw/249c92f3385757b6edf2ceb49e26f14b89ffdcfe/bootstrap.js"></script>
</body>


网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接