Compare commits
32 Commits
master
...
feature/sc
Author | SHA1 | Date | |
---|---|---|---|
a6a3372477 | |||
3ffe0256cc | |||
712154f5b1 | |||
b473c83879 | |||
a071d879a1 | |||
98975053f0 | |||
61c55da887 | |||
3687fbcfae | |||
99cbc7225c | |||
bdfafa290c | |||
90cabe1fa3 | |||
835d0f2833 | |||
92d0fa6476 | |||
36e1bba05b | |||
a10c9c20b2 | |||
bc66b14208 | |||
5c491f97a2 | |||
8df28d413d | |||
a11ba9f2e7 | |||
c748f8968e | |||
acad28b6bc | |||
8294bf4fb3 | |||
2a8c451d9e | |||
799d2bdf59 | |||
d577cd02ef | |||
af8931e2db | |||
0c46807d6a | |||
9549c7f409 | |||
9b1a6fa279 | |||
4d551a3bfc | |||
da26a320b4 | |||
51b0e65ad7 |
51
.github/workflows/push_dev.yml
vendored
51
.github/workflows/push_dev.yml
vendored
@ -6,7 +6,41 @@ on:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build:
|
||||
authorizer-docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2.1.0
|
||||
- name: Setup Dotnet
|
||||
uses: actions/setup-dotnet@v1
|
||||
with:
|
||||
dotnet-version: '3.1.100'
|
||||
- name: Install Packages
|
||||
working-directory: authorizer
|
||||
run: |
|
||||
dotnet add package StackExchange.Redis --version 2.1.39 --source https://www.myget.org/F/stackoverflow/api/v3/index.json
|
||||
|
||||
# AWS SDK
|
||||
dotnet add package AWSSDK.Core --version 3.5.0-beta
|
||||
dotnet add package AWSSDK.ECS --version 3.5.0-beta
|
||||
- name: Build
|
||||
working-directory: authorizer
|
||||
run: |
|
||||
dotnet build --configuration Release
|
||||
- name: Push Tag to GitHub Package
|
||||
uses: opspresso/action-docker@master
|
||||
with:
|
||||
args: --docker
|
||||
env:
|
||||
USERNAME: ${{ github.actor }}
|
||||
PASSWORD: ${{ secrets.GITHUB_TOKEN }}
|
||||
REGISTRY: "docker.pkg.github.com"
|
||||
BUILD_PATH: "authorizer"
|
||||
DOCKERFILE: "authorizer/Dockerfile"
|
||||
IMAGE_NAME: "authorizer"
|
||||
TAG_NAME: dev-${{ github.actor }}
|
||||
LATEST: "false"
|
||||
cloudformation:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
@ -23,4 +57,17 @@ jobs:
|
||||
DEST_DIR: develop/cloudformation
|
||||
AWS_S3_BUCKET: dt-deployment-bucket
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: us-east-2
|
||||
- name: Deploy to AWS CloudFormation
|
||||
uses: aws-actions/aws-cloudformation-github-deploy@v1
|
||||
with:
|
||||
name: dt-infrastructure-dev-${{ github.actor }}
|
||||
template: infrastructure/cloudformation/dt/top.yaml
|
||||
capabilities: "CAPABILITY_NAMED_IAM,CAPABILITY_IAM"
|
||||
parameter-overrides: VpcId=${{ secrets.VPC_ID }},SubDomain=${{ github.actor }}.dev,Domain=${{ secrets.DOMAIN }},environment=${{ github.actor }},DockerTag=stage,release=develop,PublicSubnets=${{ secrets.SUBNET_IDS }}
|
5
.gitignore
vendored
5
.gitignore
vendored
@ -30,4 +30,7 @@
|
||||
*.exe
|
||||
*.out
|
||||
*.app
|
||||
.vscode
|
||||
.vscode
|
||||
|
||||
# Compressed Artifacts
|
||||
*.zip
|
||||
|
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -1,3 +1,6 @@
|
||||
[submodule "infrastructure/cloudformation/cluster"]
|
||||
path = infrastructure/cloudformation/cluster
|
||||
url = git@github.com:josephbmanley/aws-cluster-stack.git
|
||||
[submodule "infrastructure/cloudformation/redis"]
|
||||
path = infrastructure/cloudformation/redis
|
||||
url = git@github.com:josephbmanley/aws-redis-cluster.git
|
||||
|
37
authorizer/.gitignore
vendored
Normal file
37
authorizer/.gitignore
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
*.swp
|
||||
*.*~
|
||||
project.lock.json
|
||||
.DS_Store
|
||||
*.pyc
|
||||
nupkg/
|
||||
|
||||
# Visual Studio Code
|
||||
.vscode
|
||||
|
||||
# Rider
|
||||
.idea
|
||||
|
||||
# User-specific files
|
||||
*.suo
|
||||
*.user
|
||||
*.userosscache
|
||||
*.sln.docstates
|
||||
|
||||
# Build results
|
||||
[Dd]ebug/
|
||||
[Dd]ebugPublic/
|
||||
[Rr]elease/
|
||||
[Rr]eleases/
|
||||
x64/
|
||||
x86/
|
||||
build/
|
||||
bld/
|
||||
[Bb]in/
|
||||
[Oo]bj/
|
||||
[Oo]ut/
|
||||
msbuild.log
|
||||
msbuild.err
|
||||
msbuild.wrn
|
||||
|
||||
# Visual Studio 2015
|
||||
.vs/
|
7
authorizer/Dockerfile
Normal file
7
authorizer/Dockerfile
Normal file
@ -0,0 +1,7 @@
|
||||
FROM mcr.microsoft.com/dotnet/core/runtime:3.1
|
||||
|
||||
COPY bin/Release/netcoreapp3.1/ App/
|
||||
WORKDIR /App
|
||||
ENTRYPOINT ["dotnet", "authorizer.dll"]
|
||||
|
||||
EXPOSE 7778/tcp
|
24
authorizer/Program.cs
Normal file
24
authorizer/Program.cs
Normal file
@ -0,0 +1,24 @@
|
||||
using System;
|
||||
using System.Threading;
|
||||
|
||||
namespace authorizer
|
||||
{
|
||||
class Program
|
||||
{
|
||||
static AuthServer server;
|
||||
static void Main(string[] args)
|
||||
{
|
||||
server = new AuthServer();
|
||||
|
||||
server.Start();
|
||||
|
||||
string input;
|
||||
do
|
||||
{
|
||||
input = Console.ReadLine();
|
||||
}
|
||||
while(input != "stop");
|
||||
server.Stop();
|
||||
}
|
||||
}
|
||||
}
|
32
authorizer/Redis.cs
Normal file
32
authorizer/Redis.cs
Normal file
@ -0,0 +1,32 @@
|
||||
using System;
|
||||
using StackExchange.Redis;
|
||||
|
||||
class Redis
|
||||
{
|
||||
private ConnectionMultiplexer muxer;
|
||||
public IDatabase conn;
|
||||
private string hostname;
|
||||
private int port;
|
||||
public Redis(string host = "127.0.0.1", int p = 6379)
|
||||
{
|
||||
if(host == "")
|
||||
{
|
||||
throw new Exception("Must provide a redis hostname!");
|
||||
}
|
||||
|
||||
//Set private variables
|
||||
hostname = host;
|
||||
port = p;
|
||||
|
||||
//Connect to redis cluster
|
||||
Console.WriteLine("Attempting to connect to: " + host + ":" + p.ToString());
|
||||
muxer = ConnectionMultiplexer.Connect(hostname + ":" + port.ToString());
|
||||
conn = muxer.GetDatabase();
|
||||
Console.WriteLine("Connected to redis server!");
|
||||
}
|
||||
|
||||
~Redis()
|
||||
{
|
||||
muxer.Close();
|
||||
}
|
||||
}
|
125
authorizer/Server.cs
Normal file
125
authorizer/Server.cs
Normal file
@ -0,0 +1,125 @@
|
||||
using System;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using System.IO;
|
||||
using System.Net;
|
||||
using System.Net.Sockets;
|
||||
|
||||
using Amazon.ECS;
|
||||
using Amazon.ECS.Model;
|
||||
using System.Collections.Generic;
|
||||
class AuthServer
|
||||
{
|
||||
private bool running = false;
|
||||
private int port;
|
||||
private Thread thread;
|
||||
private IPAddress address;
|
||||
|
||||
// Core objects
|
||||
private TcpListener server;
|
||||
private Redis redis;
|
||||
private AmazonECSClient ecs;
|
||||
|
||||
|
||||
public AuthServer(string addr = "0.0.0.0", int p = 7778)
|
||||
{
|
||||
port = p;
|
||||
address = IPAddress.Parse(addr);
|
||||
|
||||
redis = new Redis(Environment.GetEnvironmentVariable("REDIS_HOSTNAME"));
|
||||
server = new TcpListener(address, port);
|
||||
ecs = new AmazonECSClient();
|
||||
}
|
||||
|
||||
private Amazon.ECS.Model.Task GetTask(string task_arn)
|
||||
{
|
||||
// Builds ECS Request
|
||||
DescribeTasksRequest r = new DescribeTasksRequest();
|
||||
List<string> tasks = new List<string>();
|
||||
tasks.Add(task_arn);
|
||||
r.Tasks = tasks;
|
||||
|
||||
//Send Describe Tasks Request
|
||||
var t = ecs.DescribeTasksAsync(r);
|
||||
t.RunSynchronously();
|
||||
|
||||
//Return result
|
||||
return t.Result.Tasks[0];
|
||||
}
|
||||
private void ServerLoop()
|
||||
{
|
||||
while(running)
|
||||
{
|
||||
try
|
||||
{
|
||||
//Wait for connection
|
||||
TcpClient client = server.AcceptTcpClient();
|
||||
//Get remote address
|
||||
IPEndPoint endPoint = (IPEndPoint) client.Client.RemoteEndPoint;
|
||||
Console.WriteLine(endPoint.Address.ToString() + " connected!");
|
||||
|
||||
//Create streams
|
||||
NetworkStream stream = client.GetStream();
|
||||
StreamWriter writer = new StreamWriter(stream);
|
||||
|
||||
Byte[] bytes = new byte[256];
|
||||
String data = null;
|
||||
|
||||
long num_of_tasks = redis.conn.ListLength("tasks");
|
||||
|
||||
if(num_of_tasks > 0)
|
||||
{
|
||||
|
||||
var task = GetTask(redis.conn.ListGetByIndex("tasks",0));
|
||||
int port = task.Containers[0].NetworkBindings[0].HostPort;
|
||||
string hostname = task.Containers[0].NetworkBindings[0].BindIP;
|
||||
|
||||
writer.Write("server:" + hostname + ":" + port.ToString());
|
||||
writer.Flush();
|
||||
Console.WriteLine("Routed client to " + hostname + ":" + port.ToString());
|
||||
}
|
||||
else
|
||||
{
|
||||
string msg = "ERROR: No valid game server found!";
|
||||
Console.WriteLine(msg);
|
||||
writer.Write(msg);
|
||||
writer.Flush();
|
||||
}
|
||||
|
||||
//Read any client response
|
||||
int i;
|
||||
while((i = stream.Read(bytes, 0, bytes.Length)) != 0)
|
||||
{
|
||||
data = System.Text.Encoding.ASCII.GetString(bytes, 0, i);
|
||||
Console.WriteLine("Recieved: {0}", data);
|
||||
}
|
||||
|
||||
client.Close();
|
||||
Console.WriteLine("Client disconnected");
|
||||
}
|
||||
catch(Exception e)
|
||||
{
|
||||
Console.WriteLine("Fatal exception: " + e.ToString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void Start()
|
||||
{
|
||||
server.Start();
|
||||
running = true;
|
||||
ThreadStart entrypoint = new ThreadStart(ServerLoop);
|
||||
thread = new Thread(entrypoint);
|
||||
thread.Start();
|
||||
Console.WriteLine("Waiting for a connection...");
|
||||
}
|
||||
public void Stop()
|
||||
{
|
||||
running = false;
|
||||
}
|
||||
public void ForceStop()
|
||||
{
|
||||
running = false;
|
||||
thread.Abort();
|
||||
}
|
||||
}
|
14
authorizer/authorizer.csproj
Normal file
14
authorizer/authorizer.csproj
Normal file
@ -0,0 +1,14 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
|
||||
<PropertyGroup>
|
||||
<OutputType>Exe</OutputType>
|
||||
<TargetFramework>netcoreapp3.1</TargetFramework>
|
||||
</PropertyGroup>
|
||||
|
||||
<ItemGroup>
|
||||
<PackageReference Include="AWSSDK.Core" Version="3.5.0-beta" />
|
||||
<PackageReference Include="AWSSDK.ECS" Version="3.5.0-beta" />
|
||||
<PackageReference Include="StackExchange.Redis" Version="2.1.39" />
|
||||
</ItemGroup>
|
||||
|
||||
</Project>
|
@ -24,6 +24,7 @@ config/icon="res://icon.png"
|
||||
MusicManager="*res://nodes/MusicManager.tscn"
|
||||
NetworkManager="*res://nodes/NetworkManager.tscn"
|
||||
ImportantEntities="*res://scripts/singletons/ImportantEntities.gd"
|
||||
Authorizer="*res://scripts/network/Authorizer.gd"
|
||||
|
||||
[input]
|
||||
|
||||
|
61
client/scripts/network/Authorizer.gd
Normal file
61
client/scripts/network/Authorizer.gd
Normal file
@ -0,0 +1,61 @@
|
||||
extends Node
|
||||
|
||||
signal auth_connected
|
||||
signal auth_disconnected
|
||||
|
||||
var client : StreamPeerTCP = null
|
||||
var server_hostname : String = "127.0.0.1"
|
||||
var server_port = 7778
|
||||
|
||||
func _ready():
|
||||
client = StreamPeerTCP.new()
|
||||
client.set_no_delay(true)
|
||||
set_process(false)
|
||||
|
||||
func auth_connect(host=server_hostname, port=server_port):
|
||||
|
||||
# Connect if not connected
|
||||
if !client.is_connected_to_host():
|
||||
server_hostname = host
|
||||
server_port = port
|
||||
|
||||
# Connect Socket & Create Stream
|
||||
client.connect_to_host(server_hostname, port)
|
||||
|
||||
# Start listening
|
||||
set_process(true)
|
||||
|
||||
# Validate intial connection
|
||||
if client.is_connected_to_host():
|
||||
client.put_string("Hey there daddy!")
|
||||
emit_signal("auth_connected")
|
||||
return true
|
||||
else:
|
||||
# Timeout implemented in `process` loop
|
||||
print("Waiting for host connection...")
|
||||
return false
|
||||
else:
|
||||
print("Client is already connected to server!")
|
||||
return false
|
||||
|
||||
func auth_disconnect():
|
||||
client.disconnect_from_host()
|
||||
set_process(false) # Disable listening loop
|
||||
print_debug("Disconnected from host.")
|
||||
emit_signal("auth_disconnected")
|
||||
|
||||
var count = 0
|
||||
func _process(delta):
|
||||
|
||||
if client.get_available_bytes() > 0:
|
||||
print(client.get_available_bytes())
|
||||
|
||||
print(client.get_string(client.get_available_bytes()))
|
||||
|
||||
|
||||
# Await for client connection
|
||||
if client.get_status()==1:
|
||||
count= count+delta
|
||||
if count>1: # if it took more than 1s to connect, error
|
||||
print_debug("Failed connect, disconnecting...")
|
||||
auth_disconnect() #interrupts connection to nothing
|
@ -1,17 +1,31 @@
|
||||
extends Node
|
||||
|
||||
var auth
|
||||
|
||||
func _ready():
|
||||
$"/root/MusicManager".play_music("wizards")
|
||||
$Button.connect("button_down", self, "_on_button_press")
|
||||
$"/root/NetworkManager".connect("error_occured", self, "_on_error")
|
||||
$"/root/NetworkManager".connect("logged_in", self, "_on_login")
|
||||
auth = $"/root/Authorizer"
|
||||
auth.connect("auth_connected", self, "_on_auth_connection")
|
||||
auth.connect("auth_disconnected", self, "_on_auth_disconnection")
|
||||
|
||||
|
||||
func _on_error():
|
||||
$ErrorDialog/ErrorLabel.text = $"/root/NetworkManager".error_info
|
||||
$ErrorDialog.popup_centered()
|
||||
|
||||
func _on_button_press():
|
||||
auth.auth_connect()
|
||||
|
||||
func _on_auth_connection():
|
||||
$Button.disabled = true
|
||||
|
||||
func _on_auth_disconnection():
|
||||
$Button.disabled = false
|
||||
|
||||
func _on_button_press_OLD():
|
||||
if($"/root/NetworkManager".connected):
|
||||
$"/root/NetworkManager".disconnect_from_server()
|
||||
else:
|
||||
|
56
infrastructure/cloudformation/dt/auth_task.yaml
Normal file
56
infrastructure/cloudformation/dt/auth_task.yaml
Normal file
@ -0,0 +1,56 @@
|
||||
AWSTemplateFormatVersion: '2010-09-09'
|
||||
Description: Defend Togeher ECS Task
|
||||
Parameters:
|
||||
LogGroupName:
|
||||
Type: String
|
||||
Description: The AWS CloudWatch log group to output logs to.
|
||||
Default: "/ecs/dt"
|
||||
|
||||
environment:
|
||||
Type: String
|
||||
Description: Name of the environment to use in naming.
|
||||
Default: production
|
||||
|
||||
DockerTag:
|
||||
Description: Tag in DockerHub to deploy
|
||||
Type: String
|
||||
Default: "latest"
|
||||
|
||||
RedisHostname:
|
||||
Description: Redis host to connect to
|
||||
Type: String
|
||||
|
||||
Resources:
|
||||
|
||||
LogGroup:
|
||||
Type: AWS::Logs::LogGroup
|
||||
Properties:
|
||||
RetentionInDays: 7
|
||||
LogGroupName: !Sub "${LogGroupName}-auth/${environment}"
|
||||
|
||||
TaskDefinition:
|
||||
Type: AWS::ECS::TaskDefinition
|
||||
Properties:
|
||||
ContainerDefinitions:
|
||||
- Name: defend-together-authorizer
|
||||
Essential: 'true'
|
||||
Image: !Sub "josephbmanley/defend-together-authorizer:${DockerTag}"
|
||||
MemoryReservation: 250
|
||||
PortMappings:
|
||||
- HostPort: 7778
|
||||
ContainerPort: 7778
|
||||
Protocol: tcp
|
||||
Environment:
|
||||
- Name: REDIS_HOSTNAME
|
||||
Value: !Ref RedisHostname
|
||||
LogConfiguration:
|
||||
LogDriver: awslogs
|
||||
Options:
|
||||
awslogs-region:
|
||||
Ref: AWS::Region
|
||||
awslogs-group:
|
||||
Ref: LogGroup
|
||||
Outputs:
|
||||
TaskArn:
|
||||
Description: ARN of the TaskDefinition
|
||||
Value: !Ref TaskDefinition
|
49
infrastructure/cloudformation/dt/cloudwatch.yaml
Normal file
49
infrastructure/cloudformation/dt/cloudwatch.yaml
Normal file
@ -0,0 +1,49 @@
|
||||
AWSTemplateFormatVersion: "2010-09-09"
|
||||
Description: DT CloudWatch stack
|
||||
Parameters:
|
||||
#------------------------
|
||||
# Deployment Information
|
||||
#------------------------
|
||||
environment:
|
||||
Type: String
|
||||
Description: Name of the environment
|
||||
Default: production
|
||||
|
||||
#----------------
|
||||
# ECS Information
|
||||
#----------------
|
||||
Cluster:
|
||||
Description: The ECS cluster to watch
|
||||
Type: String
|
||||
|
||||
#-------------------
|
||||
# Lambda Information
|
||||
#-------------------
|
||||
LambdaArn:
|
||||
Description: Lambda function to call upon ecs task state change
|
||||
Type: String
|
||||
|
||||
Resources:
|
||||
|
||||
CloudWatchLambdaPermission:
|
||||
Type: AWS::Lambda::Permission
|
||||
Properties:
|
||||
Action: lambda:InvokeFunction
|
||||
Principal: events.amazonaws.com
|
||||
SourceArn: !GetAtt TaskListRule.Arn
|
||||
FunctionName: !Ref LambdaArn
|
||||
|
||||
TaskListRule:
|
||||
Type: AWS::Events::Rule
|
||||
Properties:
|
||||
EventPattern:
|
||||
source:
|
||||
- "aws.ecs"
|
||||
detail-type:
|
||||
- "ECS Task State Change"
|
||||
detail:
|
||||
clusterArn:
|
||||
- !Ref Cluster
|
||||
Targets:
|
||||
- Id: RedisUpdater
|
||||
Arn: !Ref LambdaArn
|
53
infrastructure/cloudformation/dt/iam.yaml
Normal file
53
infrastructure/cloudformation/dt/iam.yaml
Normal file
@ -0,0 +1,53 @@
|
||||
AWSTemplateFormatVersion: "2010-09-09"
|
||||
Description: DT IAM stack
|
||||
Parameters:
|
||||
#------------------------
|
||||
# Deployment Information
|
||||
#------------------------
|
||||
environment:
|
||||
Type: String
|
||||
Description: Name of the environment
|
||||
Default: production
|
||||
|
||||
Resources:
|
||||
DefaultLambdaRole:
|
||||
Type: AWS::IAM::Role
|
||||
Properties:
|
||||
AssumeRolePolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Principal:
|
||||
Service:
|
||||
- lambda.amazonaws.com
|
||||
Action:
|
||||
- sts:AssumeRole
|
||||
Policies:
|
||||
- PolicyName: LambdaLogging
|
||||
PolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Action:
|
||||
- logs:CreateLogGroup
|
||||
- logs:CreateLogStream
|
||||
- logs:PutLogEvents
|
||||
Resource: "*"
|
||||
- PolicyName: AttachToVpc
|
||||
PolicyDocument:
|
||||
Version: 2012-10-17
|
||||
Statement:
|
||||
- Effect: Allow
|
||||
Action:
|
||||
- ec2:CreateNetworkInterface
|
||||
- ec2:DescribeNetworkInterfaces
|
||||
- ec2:DeleteNetworkInterface
|
||||
- ec2:DescribeSecurityGroups
|
||||
- ec2:DescribeSubnets
|
||||
- ec2:DescribeVpcs
|
||||
Resource: "*"
|
||||
|
||||
Outputs:
|
||||
DefaultRole:
|
||||
Description: Default lambda role with logging policy
|
||||
Value: !GetAtt DefaultLambdaRole.Arn
|
70
infrastructure/cloudformation/dt/lambdas.yaml
Normal file
70
infrastructure/cloudformation/dt/lambdas.yaml
Normal file
@ -0,0 +1,70 @@
|
||||
AWSTemplateFormatVersion: "2010-09-09"
|
||||
Description: DT Lambdas stack
|
||||
Parameters:
|
||||
#------------------------
|
||||
# Deployment Information
|
||||
#------------------------
|
||||
environment:
|
||||
Type: String
|
||||
Description: Name of the environment
|
||||
Default: production
|
||||
release:
|
||||
Type: String
|
||||
Description: Name of the release name of the stack version to use.
|
||||
Default: production
|
||||
|
||||
#----------------
|
||||
# IAM Information
|
||||
#----------------
|
||||
TaskManagerRole:
|
||||
Type: String
|
||||
Description: IAM role assumed by Task Manager Lambda
|
||||
VpcId:
|
||||
Type: AWS::EC2::VPC::Id
|
||||
Description: The id of the VPC the cluster will be in
|
||||
ConstraintDescription: VPC Id must begin with 'vpc-'
|
||||
SubnetIds:
|
||||
Type: List<AWS::EC2::Subnet::Id>
|
||||
Description: Comma seperated list of subnets for ECS instances to run in
|
||||
|
||||
Resources:
|
||||
|
||||
TaskListSecurityGroup:
|
||||
Type: AWS::EC2::SecurityGroup
|
||||
Properties:
|
||||
GroupDescription: TaskListManagerLambda Allowed Ports
|
||||
VpcId: !Ref VpcId
|
||||
SecurityGroupIngress:
|
||||
- IpProtocol: tcp
|
||||
FromPort: '0'
|
||||
ToPort: '65535'
|
||||
CidrIp: 0.0.0.0/0
|
||||
SecurityGroupEgress:
|
||||
- IpProtocol: tcp
|
||||
FromPort: '0'
|
||||
ToPort: '65535'
|
||||
CidrIp: 0.0.0.0/0
|
||||
|
||||
|
||||
TaskListManagerLambda:
|
||||
Type: AWS::Lambda::Function
|
||||
Properties:
|
||||
Handler: lambda_function.lambda_handler
|
||||
Runtime: python3.7
|
||||
Code:
|
||||
S3Bucket: dt-deployment-bucket
|
||||
S3Key: !Sub "${release}/lambda/task_queue_manager.zip"
|
||||
FunctionName: !Sub "FnQueueManager-DT-${environment}"
|
||||
Description: Adds and removes tasks from a redis list
|
||||
MemorySize: 128
|
||||
Timeout: 10
|
||||
Role: !Ref TaskManagerRole
|
||||
VpcConfig:
|
||||
SecurityGroupIds:
|
||||
- !Ref TaskListSecurityGroup
|
||||
SubnetIds: !Ref SubnetIds
|
||||
|
||||
Outputs:
|
||||
TaskListManager:
|
||||
Value: !GetAtt TaskListManagerLambda.Arn
|
||||
Description: Function that adds and removes tasks from a redis list
|
@ -61,20 +61,69 @@ Resources:
|
||||
Condition: CreateDns
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/dns.yaml'
|
||||
TemplateURL: !Sub 'https://s3.us-east-2.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/dns.yaml'
|
||||
Parameters:
|
||||
environment: !Ref environment
|
||||
Domain: !Ref Domain
|
||||
SubDomain: !Ref SubDomain
|
||||
DtDNS: !GetAtt LoadBalancing.Outputs.NlbDnsName
|
||||
|
||||
#-----
|
||||
# IAM
|
||||
#-----
|
||||
IAM:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
TemplateURL: !Sub 'https://s3.us-east-2.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/iam.yaml'
|
||||
Parameters:
|
||||
environment: !Ref environment
|
||||
|
||||
#--------
|
||||
# Lambda
|
||||
#--------
|
||||
LambdaFunctions:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
TemplateURL: !Sub 'https://s3.us-east-2.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/lambdas.yaml'
|
||||
Parameters:
|
||||
environment: !Ref environment
|
||||
release: !Ref release
|
||||
TaskManagerRole: !GetAtt IAM.Outputs.DefaultRole
|
||||
VpcId: !Ref VpcId
|
||||
SubnetIds: !Join [",", !Split [" ", !Ref PublicSubnets]]
|
||||
|
||||
#------------
|
||||
# CloudWatch
|
||||
#------------
|
||||
CloudWatchRules:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
TemplateURL: !Sub 'https://s3.us-east-2.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/cloudwatch.yaml'
|
||||
Parameters:
|
||||
environment: !Ref environment
|
||||
Cluster: !GetAtt EcsCluster.Outputs.ClusterArn
|
||||
LambdaArn: !GetAtt LambdaFunctions.Outputs.TaskListManager
|
||||
|
||||
#---------
|
||||
# Caching
|
||||
#---------
|
||||
RedisCache:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
TemplateURL: !Sub 'https://s3.us-east-2.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/redis/top.yaml'
|
||||
Parameters:
|
||||
Environment: !Ref environment
|
||||
VpcId: !Ref VpcId
|
||||
SubnetIds: !Join [",", !Split [" ", !Ref PublicSubnets]]
|
||||
Project: "DT"
|
||||
|
||||
#-----------------
|
||||
# Load Balancing
|
||||
#-----------------
|
||||
LoadBalancing:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/load_balancing.yaml'
|
||||
TemplateURL: !Sub 'https://s3.us-east-2.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/load_balancing.yaml'
|
||||
Parameters:
|
||||
environment: !Ref environment
|
||||
release: !Ref release
|
||||
@ -84,20 +133,22 @@ Resources:
|
||||
EcsCluster:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/cluster/top.yaml'
|
||||
TemplateURL: !Sub 'https://s3.us-east-2.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/cluster/top.yaml'
|
||||
Parameters:
|
||||
Environment: !Ref environment
|
||||
VpcId: !Ref VpcId
|
||||
SubnetIds: !Join [",", !Split [" ", !Ref PublicSubnets]]
|
||||
Project: "DT"
|
||||
|
||||
#-------------------
|
||||
# ECS Task & Service
|
||||
#-------------------
|
||||
#----------------------
|
||||
# ECS Tasks & Services
|
||||
#----------------------
|
||||
|
||||
# Game Server
|
||||
TaskDefinition:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
TemplateURL: !Sub 'https://s3.${AWS::Region}.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/task.yaml'
|
||||
TemplateURL: !Sub 'https://s3.us-east-2.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/server_task.yaml'
|
||||
Parameters:
|
||||
environment: !Ref environment
|
||||
LogGroupName: !Ref LogGroup
|
||||
@ -111,7 +162,21 @@ Resources:
|
||||
Cluster: !GetAtt EcsCluster.Outputs.Cluster
|
||||
DesiredCount: 1
|
||||
TaskDefinition: !GetAtt TaskDefinition.Outputs.TaskArn
|
||||
LoadBalancers:
|
||||
- ContainerName: "defend-together"
|
||||
ContainerPort: 7777
|
||||
TargetGroupArn: !GetAtt LoadBalancing.Outputs.TargetGroup
|
||||
|
||||
# Auth Server
|
||||
AuthTaskDefinition:
|
||||
Type: AWS::CloudFormation::Stack
|
||||
Properties:
|
||||
TemplateURL: !Sub 'https://s3.us-east-2.amazonaws.com/dt-deployment-bucket/${release}/cloudformation/dt/auth_task.yaml'
|
||||
Parameters:
|
||||
environment: !Ref environment
|
||||
LogGroupName: !Ref LogGroup
|
||||
DockerTag: !Ref DockerTag
|
||||
RedisHostname: !GetAtt RedisCache.Outputs.Endpoint
|
||||
|
||||
AuthService:
|
||||
Type: AWS::ECS::Service
|
||||
Properties:
|
||||
Cluster: !GetAtt EcsCluster.Outputs.Cluster
|
||||
DesiredCount: 1
|
||||
TaskDefinition: !GetAtt AuthTaskDefinition.Outputs.TaskArn
|
1
infrastructure/cloudformation/redis
Submodule
1
infrastructure/cloudformation/redis
Submodule
@ -0,0 +1 @@
|
||||
Subproject commit b051e15fa09b8e6f764a17d7fc35ae6e74e630e1
|
15
infrastructure/lambda/task_queue_manager/lambda_function.py
Normal file
15
infrastructure/lambda/task_queue_manager/lambda_function.py
Normal file
@ -0,0 +1,15 @@
|
||||
import redis
|
||||
import json, os
|
||||
|
||||
def lambda_handler(event, context):
|
||||
r = redis.Redis(host=os.environ['REDIS_HOST'], port=6379, db=0)
|
||||
|
||||
if event["detail"]["group"] == "service:" + os.environ["ECS_SERVICE"]:
|
||||
desired = event["detail"]["desiredStatus"]
|
||||
last = event["detail"]["lastStatus"]
|
||||
if desired == "RUNNING" and desired == last:
|
||||
print("Added task: " + event["detail"]["taskArn"])
|
||||
r.lpush("tasks", event["detail"]["taskArn"])
|
||||
elif desired == "STOPPED" or last == "STOPPED":
|
||||
r.lrem("tasks", event["detail"]["taskArn"], 1)
|
||||
print("Removed task: " + event["detail"]["taskArn"], 1)
|
@ -0,0 +1 @@
|
||||
pip
|
@ -0,0 +1,22 @@
|
||||
Copyright (c) 2012 Andy McCurdy
|
||||
|
||||
Permission is hereby granted, free of charge, to any person
|
||||
obtaining a copy of this software and associated documentation
|
||||
files (the "Software"), to deal in the Software without
|
||||
restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the
|
||||
Software is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
@ -0,0 +1,927 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: redis
|
||||
Version: 3.5.2
|
||||
Summary: Python client for Redis key-value store
|
||||
Home-page: https://github.com/andymccurdy/redis-py
|
||||
Author: Andy McCurdy
|
||||
Author-email: sedrik@gmail.com
|
||||
Maintainer: Andy McCurdy
|
||||
Maintainer-email: sedrik@gmail.com
|
||||
License: MIT
|
||||
Keywords: Redis,key-value store
|
||||
Platform: UNKNOWN
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Environment :: Console
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.5
|
||||
Classifier: Programming Language :: Python :: 3.6
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
|
||||
Provides-Extra: hiredis
|
||||
Requires-Dist: hiredis (>=0.1.3) ; extra == 'hiredis'
|
||||
|
||||
redis-py
|
||||
========
|
||||
|
||||
The Python interface to the Redis key-value store.
|
||||
|
||||
.. image:: https://secure.travis-ci.org/andymccurdy/redis-py.svg?branch=master
|
||||
:target: https://travis-ci.org/andymccurdy/redis-py
|
||||
.. image:: https://readthedocs.org/projects/redis-py/badge/?version=stable&style=flat
|
||||
:target: https://redis-py.readthedocs.io/en/stable/
|
||||
.. image:: https://badge.fury.io/py/redis.svg
|
||||
:target: https://pypi.org/project/redis/
|
||||
.. image:: https://codecov.io/gh/andymccurdy/redis-py/branch/master/graph/badge.svg
|
||||
:target: https://codecov.io/gh/andymccurdy/redis-py
|
||||
|
||||
|
||||
Python 2 Compatibility Note
|
||||
---------------------------
|
||||
|
||||
redis-py 3.5.x will be the last version of redis-py that supports Python 2.
|
||||
The 3.5.x line will continue to get bug fixes and security patches that
|
||||
support Python 2 until August 1, 2020. redis-py 4.0 will be the next major
|
||||
version and will require Python 3.5+.
|
||||
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
redis-py requires a running Redis server. See `Redis's quickstart
|
||||
<https://redis.io/topics/quickstart>`_ for installation instructions.
|
||||
|
||||
redis-py can be installed using `pip` similar to other Python packages. Do not use `sudo`
|
||||
with `pip`. It is usually good to work in a
|
||||
`virtualenv <https://virtualenv.pypa.io/en/latest/>`_ or
|
||||
`venv <https://docs.python.org/3/library/venv.html>`_ to avoid conflicts with other package
|
||||
managers and Python projects. For a quick introduction see
|
||||
`Python Virtual Environments in Five Minutes <https://bit.ly/py-env>`_.
|
||||
|
||||
To install redis-py, simply:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install redis
|
||||
|
||||
or from source:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ python setup.py install
|
||||
|
||||
|
||||
Getting Started
|
||||
---------------
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> import redis
|
||||
>>> r = redis.Redis(host='localhost', port=6379, db=0)
|
||||
>>> r.set('foo', 'bar')
|
||||
True
|
||||
>>> r.get('foo')
|
||||
b'bar'
|
||||
|
||||
By default, all responses are returned as `bytes` in Python 3 and `str` in
|
||||
Python 2. The user is responsible for decoding to Python 3 strings or Python 2
|
||||
unicode objects.
|
||||
|
||||
If **all** string responses from a client should be decoded, the user can
|
||||
specify `decode_responses=True` to `Redis.__init__`. In this case, any
|
||||
Redis command that returns a string type will be decoded with the `encoding`
|
||||
specified.
|
||||
|
||||
|
||||
Upgrading from redis-py 2.X to 3.0
|
||||
----------------------------------
|
||||
|
||||
redis-py 3.0 introduces many new features but required a number of backwards
|
||||
incompatible changes to be made in the process. This section attempts to
|
||||
provide an upgrade path for users migrating from 2.X to 3.0.
|
||||
|
||||
|
||||
Python Version Support
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
redis-py 3.0 supports Python 2.7 and Python 3.5+.
|
||||
|
||||
|
||||
Client Classes: Redis and StrictRedis
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
redis-py 3.0 drops support for the legacy "Redis" client class. "StrictRedis"
|
||||
has been renamed to "Redis" and an alias named "StrictRedis" is provided so
|
||||
that users previously using "StrictRedis" can continue to run unchanged.
|
||||
|
||||
The 2.X "Redis" class provided alternative implementations of a few commands.
|
||||
This confused users (rightfully so) and caused a number of support issues. To
|
||||
make things easier going forward, it was decided to drop support for these
|
||||
alternate implementations and instead focus on a single client class.
|
||||
|
||||
2.X users that are already using StrictRedis don't have to change the class
|
||||
name. StrictRedis will continue to work for the foreseeable future.
|
||||
|
||||
2.X users that are using the Redis class will have to make changes if they
|
||||
use any of the following commands:
|
||||
|
||||
* SETEX: The argument order has changed. The new order is (name, time, value).
|
||||
* LREM: The argument order has changed. The new order is (name, num, value).
|
||||
* TTL and PTTL: The return value is now always an int and matches the
|
||||
official Redis command (>0 indicates the timeout, -1 indicates that the key
|
||||
exists but that it has no expire time set, -2 indicates that the key does
|
||||
not exist)
|
||||
|
||||
|
||||
SSL Connections
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
redis-py 3.0 changes the default value of the `ssl_cert_reqs` option from
|
||||
`None` to `'required'`. See
|
||||
`Issue 1016 <https://github.com/andymccurdy/redis-py/issues/1016>`_. This
|
||||
change enforces hostname validation when accepting a cert from a remote SSL
|
||||
terminator. If the terminator doesn't properly set the hostname on the cert
|
||||
this will cause redis-py 3.0 to raise a ConnectionError.
|
||||
|
||||
This check can be disabled by setting `ssl_cert_reqs` to `None`. Note that
|
||||
doing so removes the security check. Do so at your own risk.
|
||||
|
||||
It has been reported that SSL certs received from AWS ElastiCache do not have
|
||||
proper hostnames and turning off hostname verification is currently required.
|
||||
|
||||
|
||||
MSET, MSETNX and ZADD
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
These commands all accept a mapping of key/value pairs. In redis-py 2.X
|
||||
this mapping could be specified as ``*args`` or as ``**kwargs``. Both of these
|
||||
styles caused issues when Redis introduced optional flags to ZADD. Relying on
|
||||
``*args`` caused issues with the optional argument order, especially in Python
|
||||
2.7. Relying on ``**kwargs`` caused potential collision issues of user keys with
|
||||
the argument names in the method signature.
|
||||
|
||||
To resolve this, redis-py 3.0 has changed these three commands to all accept
|
||||
a single positional argument named mapping that is expected to be a dict. For
|
||||
MSET and MSETNX, the dict is a mapping of key-names -> values. For ZADD, the
|
||||
dict is a mapping of element-names -> score.
|
||||
|
||||
MSET, MSETNX and ZADD now look like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def mset(self, mapping):
|
||||
def msetnx(self, mapping):
|
||||
def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False):
|
||||
|
||||
All 2.X users that use these commands must modify their code to supply
|
||||
keys and values as a dict to these commands.
|
||||
|
||||
|
||||
ZINCRBY
|
||||
^^^^^^^
|
||||
|
||||
redis-py 2.X accidentally modified the argument order of ZINCRBY, swapping the
|
||||
order of value and amount. ZINCRBY now looks like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def zincrby(self, name, amount, value):
|
||||
|
||||
All 2.X users that rely on ZINCRBY must swap the order of amount and value
|
||||
for the command to continue to work as intended.
|
||||
|
||||
|
||||
Encoding of User Input
|
||||
^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
redis-py 3.0 only accepts user data as bytes, strings or numbers (ints, longs
|
||||
and floats). Attempting to specify a key or a value as any other type will
|
||||
raise a DataError exception.
|
||||
|
||||
redis-py 2.X attempted to coerce any type of input into a string. While
|
||||
occasionally convenient, this caused all sorts of hidden errors when users
|
||||
passed boolean values (which were coerced to 'True' or 'False'), a None
|
||||
value (which was coerced to 'None') or other values, such as user defined
|
||||
types.
|
||||
|
||||
All 2.X users should make sure that the keys and values they pass into
|
||||
redis-py are either bytes, strings or numbers.
|
||||
|
||||
|
||||
Locks
|
||||
^^^^^
|
||||
|
||||
redis-py 3.0 drops support for the pipeline-based Lock and now only supports
|
||||
the Lua-based lock. In doing so, LuaLock has been renamed to Lock. This also
|
||||
means that redis-py Lock objects require Redis server 2.6 or greater.
|
||||
|
||||
2.X users that were explicitly referring to "LuaLock" will have to now refer
|
||||
to "Lock" instead.
|
||||
|
||||
|
||||
Locks as Context Managers
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
redis-py 3.0 now raises a LockError when using a lock as a context manager and
|
||||
the lock cannot be acquired within the specified timeout. This is more of a
|
||||
bug fix than a backwards incompatible change. However, given an error is now
|
||||
raised where none was before, this might alarm some users.
|
||||
|
||||
2.X users should make sure they're wrapping their lock code in a try/catch
|
||||
like this:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
try:
|
||||
with r.lock('my-lock-key', blocking_timeout=5) as lock:
|
||||
# code you want executed only after the lock has been acquired
|
||||
except LockError:
|
||||
# the lock wasn't acquired
|
||||
|
||||
|
||||
API Reference
|
||||
-------------
|
||||
|
||||
The `official Redis command documentation <https://redis.io/commands>`_ does a
|
||||
great job of explaining each command in detail. redis-py attempts to adhere
|
||||
to the official command syntax. There are a few exceptions:
|
||||
|
||||
* **SELECT**: Not implemented. See the explanation in the Thread Safety section
|
||||
below.
|
||||
* **DEL**: 'del' is a reserved keyword in the Python syntax. Therefore redis-py
|
||||
uses 'delete' instead.
|
||||
* **MULTI/EXEC**: These are implemented as part of the Pipeline class. The
|
||||
pipeline is wrapped with the MULTI and EXEC statements by default when it
|
||||
is executed, which can be disabled by specifying transaction=False.
|
||||
See more about Pipelines below.
|
||||
* **SUBSCRIBE/LISTEN**: Similar to pipelines, PubSub is implemented as a separate
|
||||
class as it places the underlying connection in a state where it can't
|
||||
execute non-pubsub commands. Calling the pubsub method from the Redis client
|
||||
will return a PubSub instance where you can subscribe to channels and listen
|
||||
for messages. You can only call PUBLISH from the Redis client (see
|
||||
`this comment on issue #151
|
||||
<https://github.com/andymccurdy/redis-py/issues/151#issuecomment-1545015>`_
|
||||
for details).
|
||||
* **SCAN/SSCAN/HSCAN/ZSCAN**: The \*SCAN commands are implemented as they
|
||||
exist in the Redis documentation. In addition, each command has an equivalent
|
||||
iterator method. These are purely for convenience so the user doesn't have
|
||||
to keep track of the cursor while iterating. Use the
|
||||
scan_iter/sscan_iter/hscan_iter/zscan_iter methods for this behavior.
|
||||
|
||||
|
||||
More Detail
|
||||
-----------
|
||||
|
||||
Connection Pools
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
Behind the scenes, redis-py uses a connection pool to manage connections to
|
||||
a Redis server. By default, each Redis instance you create will in turn create
|
||||
its own connection pool. You can override this behavior and use an existing
|
||||
connection pool by passing an already created connection pool instance to the
|
||||
connection_pool argument of the Redis class. You may choose to do this in order
|
||||
to implement client side sharding or have fine-grain control of how
|
||||
connections are managed.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> pool = redis.ConnectionPool(host='localhost', port=6379, db=0)
|
||||
>>> r = redis.Redis(connection_pool=pool)
|
||||
|
||||
Connections
|
||||
^^^^^^^^^^^
|
||||
|
||||
ConnectionPools manage a set of Connection instances. redis-py ships with two
|
||||
types of Connections. The default, Connection, is a normal TCP socket based
|
||||
connection. The UnixDomainSocketConnection allows for clients running on the
|
||||
same device as the server to connect via a unix domain socket. To use a
|
||||
UnixDomainSocketConnection connection, simply pass the unix_socket_path
|
||||
argument, which is a string to the unix domain socket file. Additionally, make
|
||||
sure the unixsocket parameter is defined in your redis.conf file. It's
|
||||
commented out by default.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> r = redis.Redis(unix_socket_path='/tmp/redis.sock')
|
||||
|
||||
You can create your own Connection subclasses as well. This may be useful if
|
||||
you want to control the socket behavior within an async framework. To
|
||||
instantiate a client class using your own connection, you need to create
|
||||
a connection pool, passing your class to the connection_class argument.
|
||||
Other keyword parameters you pass to the pool will be passed to the class
|
||||
specified during initialization.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> pool = redis.ConnectionPool(connection_class=YourConnectionClass,
|
||||
your_arg='...', ...)
|
||||
|
||||
Connections maintain an open socket to the Redis server. Sometimes these
|
||||
sockets are interrupted or disconnected for a variety of reasons. For example,
|
||||
network appliances, load balancers and other services that sit between clients
|
||||
and servers are often configured to kill connections that remain idle for a
|
||||
given threshold.
|
||||
|
||||
When a connection becomes disconnected, the next command issued on that
|
||||
connection will fail and redis-py will raise a ConnectionError to the caller.
|
||||
This allows each application that uses redis-py to handle errors in a way
|
||||
that's fitting for that specific application. However, constant error
|
||||
handling can be verbose and cumbersome, especially when socket disconnections
|
||||
happen frequently in many production environments.
|
||||
|
||||
To combat this, redis-py can issue regular health checks to assess the
|
||||
liveliness of a connection just before issuing a command. Users can pass
|
||||
``health_check_interval=N`` to the Redis or ConnectionPool classes or
|
||||
as a query argument within a Redis URL. The value of ``health_check_interval``
|
||||
must be an integer. A value of ``0``, the default, disables health checks.
|
||||
Any positive integer will enable health checks. Health checks are performed
|
||||
just before a command is executed if the underlying connection has been idle
|
||||
for more than ``health_check_interval`` seconds. For example,
|
||||
``health_check_interval=30`` will ensure that a health check is run on any
|
||||
connection that has been idle for 30 or more seconds just before a command
|
||||
is executed on that connection.
|
||||
|
||||
If your application is running in an environment that disconnects idle
|
||||
connections after 30 seconds you should set the ``health_check_interval``
|
||||
option to a value less than 30.
|
||||
|
||||
This option also works on any PubSub connection that is created from a
|
||||
client with ``health_check_interval`` enabled. PubSub users need to ensure
|
||||
that ``get_message()`` or ``listen()`` are called more frequently than
|
||||
``health_check_interval`` seconds. It is assumed that most workloads already
|
||||
do this.
|
||||
|
||||
If your PubSub use case doesn't call ``get_message()`` or ``listen()``
|
||||
frequently, you should call ``pubsub.check_health()`` explicitly on a
|
||||
regularly basis.
|
||||
|
||||
Parsers
|
||||
^^^^^^^
|
||||
|
||||
Parser classes provide a way to control how responses from the Redis server
|
||||
are parsed. redis-py ships with two parser classes, the PythonParser and the
|
||||
HiredisParser. By default, redis-py will attempt to use the HiredisParser if
|
||||
you have the hiredis module installed and will fallback to the PythonParser
|
||||
otherwise.
|
||||
|
||||
Hiredis is a C library maintained by the core Redis team. Pieter Noordhuis was
|
||||
kind enough to create Python bindings. Using Hiredis can provide up to a
|
||||
10x speed improvement in parsing responses from the Redis server. The
|
||||
performance increase is most noticeable when retrieving many pieces of data,
|
||||
such as from LRANGE or SMEMBERS operations.
|
||||
|
||||
Hiredis is available on PyPI, and can be installed via pip just like redis-py.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install hiredis
|
||||
|
||||
Response Callbacks
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The client class uses a set of callbacks to cast Redis responses to the
|
||||
appropriate Python type. There are a number of these callbacks defined on
|
||||
the Redis client class in a dictionary called RESPONSE_CALLBACKS.
|
||||
|
||||
Custom callbacks can be added on a per-instance basis using the
|
||||
set_response_callback method. This method accepts two arguments: a command
|
||||
name and the callback. Callbacks added in this manner are only valid on the
|
||||
instance the callback is added to. If you want to define or override a callback
|
||||
globally, you should make a subclass of the Redis client and add your callback
|
||||
to its RESPONSE_CALLBACKS class dictionary.
|
||||
|
||||
Response callbacks take at least one parameter: the response from the Redis
|
||||
server. Keyword arguments may also be accepted in order to further control
|
||||
how to interpret the response. These keyword arguments are specified during the
|
||||
command's call to execute_command. The ZRANGE implementation demonstrates the
|
||||
use of response callback keyword arguments with its "withscores" argument.
|
||||
|
||||
Thread Safety
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
Redis client instances can safely be shared between threads. Internally,
|
||||
connection instances are only retrieved from the connection pool during
|
||||
command execution, and returned to the pool directly after. Command execution
|
||||
never modifies state on the client instance.
|
||||
|
||||
However, there is one caveat: the Redis SELECT command. The SELECT command
|
||||
allows you to switch the database currently in use by the connection. That
|
||||
database remains selected until another is selected or until the connection is
|
||||
closed. This creates an issue in that connections could be returned to the pool
|
||||
that are connected to a different database.
|
||||
|
||||
As a result, redis-py does not implement the SELECT command on client
|
||||
instances. If you use multiple Redis databases within the same application, you
|
||||
should create a separate client instance (and possibly a separate connection
|
||||
pool) for each database.
|
||||
|
||||
It is not safe to pass PubSub or Pipeline objects between threads.
|
||||
|
||||
Pipelines
|
||||
^^^^^^^^^
|
||||
|
||||
Pipelines are a subclass of the base Redis class that provide support for
|
||||
buffering multiple commands to the server in a single request. They can be used
|
||||
to dramatically increase the performance of groups of commands by reducing the
|
||||
number of back-and-forth TCP packets between the client and server.
|
||||
|
||||
Pipelines are quite simple to use:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> r = redis.Redis(...)
|
||||
>>> r.set('bing', 'baz')
|
||||
>>> # Use the pipeline() method to create a pipeline instance
|
||||
>>> pipe = r.pipeline()
|
||||
>>> # The following SET commands are buffered
|
||||
>>> pipe.set('foo', 'bar')
|
||||
>>> pipe.get('bing')
|
||||
>>> # the EXECUTE call sends all buffered commands to the server, returning
|
||||
>>> # a list of responses, one for each command.
|
||||
>>> pipe.execute()
|
||||
[True, b'baz']
|
||||
|
||||
For ease of use, all commands being buffered into the pipeline return the
|
||||
pipeline object itself. Therefore calls can be chained like:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> pipe.set('foo', 'bar').sadd('faz', 'baz').incr('auto_number').execute()
|
||||
[True, True, 6]
|
||||
|
||||
In addition, pipelines can also ensure the buffered commands are executed
|
||||
atomically as a group. This happens by default. If you want to disable the
|
||||
atomic nature of a pipeline but still want to buffer commands, you can turn
|
||||
off transactions.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> pipe = r.pipeline(transaction=False)
|
||||
|
||||
A common issue occurs when requiring atomic transactions but needing to
|
||||
retrieve values in Redis prior for use within the transaction. For instance,
|
||||
let's assume that the INCR command didn't exist and we need to build an atomic
|
||||
version of INCR in Python.
|
||||
|
||||
The completely naive implementation could GET the value, increment it in
|
||||
Python, and SET the new value back. However, this is not atomic because
|
||||
multiple clients could be doing this at the same time, each getting the same
|
||||
value from GET.
|
||||
|
||||
Enter the WATCH command. WATCH provides the ability to monitor one or more keys
|
||||
prior to starting a transaction. If any of those keys change prior the
|
||||
execution of that transaction, the entire transaction will be canceled and a
|
||||
WatchError will be raised. To implement our own client-side INCR command, we
|
||||
could do something like this:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> with r.pipeline() as pipe:
|
||||
... while True:
|
||||
... try:
|
||||
... # put a WATCH on the key that holds our sequence value
|
||||
... pipe.watch('OUR-SEQUENCE-KEY')
|
||||
... # after WATCHing, the pipeline is put into immediate execution
|
||||
... # mode until we tell it to start buffering commands again.
|
||||
... # this allows us to get the current value of our sequence
|
||||
... current_value = pipe.get('OUR-SEQUENCE-KEY')
|
||||
... next_value = int(current_value) + 1
|
||||
... # now we can put the pipeline back into buffered mode with MULTI
|
||||
... pipe.multi()
|
||||
... pipe.set('OUR-SEQUENCE-KEY', next_value)
|
||||
... # and finally, execute the pipeline (the set command)
|
||||
... pipe.execute()
|
||||
... # if a WatchError wasn't raised during execution, everything
|
||||
... # we just did happened atomically.
|
||||
... break
|
||||
... except WatchError:
|
||||
... # another client must have changed 'OUR-SEQUENCE-KEY' between
|
||||
... # the time we started WATCHing it and the pipeline's execution.
|
||||
... # our best bet is to just retry.
|
||||
... continue
|
||||
|
||||
Note that, because the Pipeline must bind to a single connection for the
|
||||
duration of a WATCH, care must be taken to ensure that the connection is
|
||||
returned to the connection pool by calling the reset() method. If the
|
||||
Pipeline is used as a context manager (as in the example above) reset()
|
||||
will be called automatically. Of course you can do this the manual way by
|
||||
explicitly calling reset():
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> pipe = r.pipeline()
|
||||
>>> while True:
|
||||
... try:
|
||||
... pipe.watch('OUR-SEQUENCE-KEY')
|
||||
... ...
|
||||
... pipe.execute()
|
||||
... break
|
||||
... except WatchError:
|
||||
... continue
|
||||
... finally:
|
||||
... pipe.reset()
|
||||
|
||||
A convenience method named "transaction" exists for handling all the
|
||||
boilerplate of handling and retrying watch errors. It takes a callable that
|
||||
should expect a single parameter, a pipeline object, and any number of keys to
|
||||
be WATCHed. Our client-side INCR command above can be written like this,
|
||||
which is much easier to read:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> def client_side_incr(pipe):
|
||||
... current_value = pipe.get('OUR-SEQUENCE-KEY')
|
||||
... next_value = int(current_value) + 1
|
||||
... pipe.multi()
|
||||
... pipe.set('OUR-SEQUENCE-KEY', next_value)
|
||||
>>>
|
||||
>>> r.transaction(client_side_incr, 'OUR-SEQUENCE-KEY')
|
||||
[True]
|
||||
|
||||
Be sure to call `pipe.multi()` in the callable passed to `Redis.transaction`
|
||||
prior to any write commands.
|
||||
|
||||
Publish / Subscribe
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
redis-py includes a `PubSub` object that subscribes to channels and listens
|
||||
for new messages. Creating a `PubSub` object is easy.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> r = redis.Redis(...)
|
||||
>>> p = r.pubsub()
|
||||
|
||||
Once a `PubSub` instance is created, channels and patterns can be subscribed
|
||||
to.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> p.subscribe('my-first-channel', 'my-second-channel', ...)
|
||||
>>> p.psubscribe('my-*', ...)
|
||||
|
||||
The `PubSub` instance is now subscribed to those channels/patterns. The
|
||||
subscription confirmations can be seen by reading messages from the `PubSub`
|
||||
instance.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> p.get_message()
|
||||
{'pattern': None, 'type': 'subscribe', 'channel': b'my-second-channel', 'data': 1}
|
||||
>>> p.get_message()
|
||||
{'pattern': None, 'type': 'subscribe', 'channel': b'my-first-channel', 'data': 2}
|
||||
>>> p.get_message()
|
||||
{'pattern': None, 'type': 'psubscribe', 'channel': b'my-*', 'data': 3}
|
||||
|
||||
Every message read from a `PubSub` instance will be a dictionary with the
|
||||
following keys.
|
||||
|
||||
* **type**: One of the following: 'subscribe', 'unsubscribe', 'psubscribe',
|
||||
'punsubscribe', 'message', 'pmessage'
|
||||
* **channel**: The channel [un]subscribed to or the channel a message was
|
||||
published to
|
||||
* **pattern**: The pattern that matched a published message's channel. Will be
|
||||
`None` in all cases except for 'pmessage' types.
|
||||
* **data**: The message data. With [un]subscribe messages, this value will be
|
||||
the number of channels and patterns the connection is currently subscribed
|
||||
to. With [p]message messages, this value will be the actual published
|
||||
message.
|
||||
|
||||
Let's send a message now.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
# the publish method returns the number matching channel and pattern
|
||||
# subscriptions. 'my-first-channel' matches both the 'my-first-channel'
|
||||
# subscription and the 'my-*' pattern subscription, so this message will
|
||||
# be delivered to 2 channels/patterns
|
||||
>>> r.publish('my-first-channel', 'some data')
|
||||
2
|
||||
>>> p.get_message()
|
||||
{'channel': b'my-first-channel', 'data': b'some data', 'pattern': None, 'type': 'message'}
|
||||
>>> p.get_message()
|
||||
{'channel': b'my-first-channel', 'data': b'some data', 'pattern': b'my-*', 'type': 'pmessage'}
|
||||
|
||||
Unsubscribing works just like subscribing. If no arguments are passed to
|
||||
[p]unsubscribe, all channels or patterns will be unsubscribed from.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> p.unsubscribe()
|
||||
>>> p.punsubscribe('my-*')
|
||||
>>> p.get_message()
|
||||
{'channel': b'my-second-channel', 'data': 2, 'pattern': None, 'type': 'unsubscribe'}
|
||||
>>> p.get_message()
|
||||
{'channel': b'my-first-channel', 'data': 1, 'pattern': None, 'type': 'unsubscribe'}
|
||||
>>> p.get_message()
|
||||
{'channel': b'my-*', 'data': 0, 'pattern': None, 'type': 'punsubscribe'}
|
||||
|
||||
redis-py also allows you to register callback functions to handle published
|
||||
messages. Message handlers take a single argument, the message, which is a
|
||||
dictionary just like the examples above. To subscribe to a channel or pattern
|
||||
with a message handler, pass the channel or pattern name as a keyword argument
|
||||
with its value being the callback function.
|
||||
|
||||
When a message is read on a channel or pattern with a message handler, the
|
||||
message dictionary is created and passed to the message handler. In this case,
|
||||
a `None` value is returned from get_message() since the message was already
|
||||
handled.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> def my_handler(message):
|
||||
... print('MY HANDLER: ', message['data'])
|
||||
>>> p.subscribe(**{'my-channel': my_handler})
|
||||
# read the subscribe confirmation message
|
||||
>>> p.get_message()
|
||||
{'pattern': None, 'type': 'subscribe', 'channel': b'my-channel', 'data': 1}
|
||||
>>> r.publish('my-channel', 'awesome data')
|
||||
1
|
||||
# for the message handler to work, we need tell the instance to read data.
|
||||
# this can be done in several ways (read more below). we'll just use
|
||||
# the familiar get_message() function for now
|
||||
>>> message = p.get_message()
|
||||
MY HANDLER: awesome data
|
||||
# note here that the my_handler callback printed the string above.
|
||||
# `message` is None because the message was handled by our handler.
|
||||
>>> print(message)
|
||||
None
|
||||
|
||||
If your application is not interested in the (sometimes noisy)
|
||||
subscribe/unsubscribe confirmation messages, you can ignore them by passing
|
||||
`ignore_subscribe_messages=True` to `r.pubsub()`. This will cause all
|
||||
subscribe/unsubscribe messages to be read, but they won't bubble up to your
|
||||
application.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> p = r.pubsub(ignore_subscribe_messages=True)
|
||||
>>> p.subscribe('my-channel')
|
||||
>>> p.get_message() # hides the subscribe message and returns None
|
||||
>>> r.publish('my-channel', 'my data')
|
||||
1
|
||||
>>> p.get_message()
|
||||
{'channel': b'my-channel', 'data': b'my data', 'pattern': None, 'type': 'message'}
|
||||
|
||||
There are three different strategies for reading messages.
|
||||
|
||||
The examples above have been using `pubsub.get_message()`. Behind the scenes,
|
||||
`get_message()` uses the system's 'select' module to quickly poll the
|
||||
connection's socket. If there's data available to be read, `get_message()` will
|
||||
read it, format the message and return it or pass it to a message handler. If
|
||||
there's no data to be read, `get_message()` will immediately return None. This
|
||||
makes it trivial to integrate into an existing event loop inside your
|
||||
application.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> while True:
|
||||
>>> message = p.get_message()
|
||||
>>> if message:
|
||||
>>> # do something with the message
|
||||
>>> time.sleep(0.001) # be nice to the system :)
|
||||
|
||||
Older versions of redis-py only read messages with `pubsub.listen()`. listen()
|
||||
is a generator that blocks until a message is available. If your application
|
||||
doesn't need to do anything else but receive and act on messages received from
|
||||
redis, listen() is an easy way to get up an running.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> for message in p.listen():
|
||||
... # do something with the message
|
||||
|
||||
The third option runs an event loop in a separate thread.
|
||||
`pubsub.run_in_thread()` creates a new thread and starts the event loop. The
|
||||
thread object is returned to the caller of `run_in_thread()`. The caller can
|
||||
use the `thread.stop()` method to shut down the event loop and thread. Behind
|
||||
the scenes, this is simply a wrapper around `get_message()` that runs in a
|
||||
separate thread, essentially creating a tiny non-blocking event loop for you.
|
||||
`run_in_thread()` takes an optional `sleep_time` argument. If specified, the
|
||||
event loop will call `time.sleep()` with the value in each iteration of the
|
||||
loop.
|
||||
|
||||
Note: Since we're running in a separate thread, there's no way to handle
|
||||
messages that aren't automatically handled with registered message handlers.
|
||||
Therefore, redis-py prevents you from calling `run_in_thread()` if you're
|
||||
subscribed to patterns or channels that don't have message handlers attached.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> p.subscribe(**{'my-channel': my_handler})
|
||||
>>> thread = p.run_in_thread(sleep_time=0.001)
|
||||
# the event loop is now running in the background processing messages
|
||||
# when it's time to shut it down...
|
||||
>>> thread.stop()
|
||||
|
||||
A PubSub object adheres to the same encoding semantics as the client instance
|
||||
it was created from. Any channel or pattern that's unicode will be encoded
|
||||
using the `charset` specified on the client before being sent to Redis. If the
|
||||
client's `decode_responses` flag is set the False (the default), the
|
||||
'channel', 'pattern' and 'data' values in message dictionaries will be byte
|
||||
strings (str on Python 2, bytes on Python 3). If the client's
|
||||
`decode_responses` is True, then the 'channel', 'pattern' and 'data' values
|
||||
will be automatically decoded to unicode strings using the client's `charset`.
|
||||
|
||||
PubSub objects remember what channels and patterns they are subscribed to. In
|
||||
the event of a disconnection such as a network error or timeout, the
|
||||
PubSub object will re-subscribe to all prior channels and patterns when
|
||||
reconnecting. Messages that were published while the client was disconnected
|
||||
cannot be delivered. When you're finished with a PubSub object, call its
|
||||
`.close()` method to shutdown the connection.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> p = r.pubsub()
|
||||
>>> ...
|
||||
>>> p.close()
|
||||
|
||||
|
||||
The PUBSUB set of subcommands CHANNELS, NUMSUB and NUMPAT are also
|
||||
supported:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> r.pubsub_channels()
|
||||
[b'foo', b'bar']
|
||||
>>> r.pubsub_numsub('foo', 'bar')
|
||||
[(b'foo', 9001), (b'bar', 42)]
|
||||
>>> r.pubsub_numsub('baz')
|
||||
[(b'baz', 0)]
|
||||
>>> r.pubsub_numpat()
|
||||
1204
|
||||
|
||||
Monitor
|
||||
^^^^^^^
|
||||
redis-py includes a `Monitor` object that streams every command processed
|
||||
by the Redis server. Use `listen()` on the `Monitor` object to block
|
||||
until a command is received.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> r = redis.Redis(...)
|
||||
>>> with r.monitor() as m:
|
||||
>>> for command in m.listen():
|
||||
>>> print(command)
|
||||
|
||||
Lua Scripting
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
redis-py supports the EVAL, EVALSHA, and SCRIPT commands. However, there are
|
||||
a number of edge cases that make these commands tedious to use in real world
|
||||
scenarios. Therefore, redis-py exposes a Script object that makes scripting
|
||||
much easier to use.
|
||||
|
||||
To create a Script instance, use the `register_script` function on a client
|
||||
instance passing the Lua code as the first argument. `register_script` returns
|
||||
a Script instance that you can use throughout your code.
|
||||
|
||||
The following trivial Lua script accepts two parameters: the name of a key and
|
||||
a multiplier value. The script fetches the value stored in the key, multiplies
|
||||
it with the multiplier value and returns the result.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> r = redis.Redis()
|
||||
>>> lua = """
|
||||
... local value = redis.call('GET', KEYS[1])
|
||||
... value = tonumber(value)
|
||||
... return value * ARGV[1]"""
|
||||
>>> multiply = r.register_script(lua)
|
||||
|
||||
`multiply` is now a Script instance that is invoked by calling it like a
|
||||
function. Script instances accept the following optional arguments:
|
||||
|
||||
* **keys**: A list of key names that the script will access. This becomes the
|
||||
KEYS list in Lua.
|
||||
* **args**: A list of argument values. This becomes the ARGV list in Lua.
|
||||
* **client**: A redis-py Client or Pipeline instance that will invoke the
|
||||
script. If client isn't specified, the client that initially
|
||||
created the Script instance (the one that `register_script` was
|
||||
invoked from) will be used.
|
||||
|
||||
Continuing the example from above:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> r.set('foo', 2)
|
||||
>>> multiply(keys=['foo'], args=[5])
|
||||
10
|
||||
|
||||
The value of key 'foo' is set to 2. When multiply is invoked, the 'foo' key is
|
||||
passed to the script along with the multiplier value of 5. Lua executes the
|
||||
script and returns the result, 10.
|
||||
|
||||
Script instances can be executed using a different client instance, even one
|
||||
that points to a completely different Redis server.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> r2 = redis.Redis('redis2.example.com')
|
||||
>>> r2.set('foo', 3)
|
||||
>>> multiply(keys=['foo'], args=[5], client=r2)
|
||||
15
|
||||
|
||||
The Script object ensures that the Lua script is loaded into Redis's script
|
||||
cache. In the event of a NOSCRIPT error, it will load the script and retry
|
||||
executing it.
|
||||
|
||||
Script objects can also be used in pipelines. The pipeline instance should be
|
||||
passed as the client argument when calling the script. Care is taken to ensure
|
||||
that the script is registered in Redis's script cache just prior to pipeline
|
||||
execution.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> pipe = r.pipeline()
|
||||
>>> pipe.set('foo', 5)
|
||||
>>> multiply(keys=['foo'], args=[5], client=pipe)
|
||||
>>> pipe.execute()
|
||||
[True, 25]
|
||||
|
||||
Sentinel support
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
redis-py can be used together with `Redis Sentinel <https://redis.io/topics/sentinel>`_
|
||||
to discover Redis nodes. You need to have at least one Sentinel daemon running
|
||||
in order to use redis-py's Sentinel support.
|
||||
|
||||
Connecting redis-py to the Sentinel instance(s) is easy. You can use a
|
||||
Sentinel connection to discover the master and slaves network addresses:
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> from redis.sentinel import Sentinel
|
||||
>>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
|
||||
>>> sentinel.discover_master('mymaster')
|
||||
('127.0.0.1', 6379)
|
||||
>>> sentinel.discover_slaves('mymaster')
|
||||
[('127.0.0.1', 6380)]
|
||||
|
||||
You can also create Redis client connections from a Sentinel instance. You can
|
||||
connect to either the master (for write operations) or a slave (for read-only
|
||||
operations).
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
|
||||
>>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
|
||||
>>> master.set('foo', 'bar')
|
||||
>>> slave.get('foo')
|
||||
b'bar'
|
||||
|
||||
The master and slave objects are normal Redis instances with their
|
||||
connection pool bound to the Sentinel instance. When a Sentinel backed client
|
||||
attempts to establish a connection, it first queries the Sentinel servers to
|
||||
determine an appropriate host to connect to. If no server is found,
|
||||
a MasterNotFoundError or SlaveNotFoundError is raised. Both exceptions are
|
||||
subclasses of ConnectionError.
|
||||
|
||||
When trying to connect to a slave client, the Sentinel connection pool will
|
||||
iterate over the list of slaves until it finds one that can be connected to.
|
||||
If no slaves can be connected to, a connection will be established with the
|
||||
master.
|
||||
|
||||
See `Guidelines for Redis clients with support for Redis Sentinel
|
||||
<https://redis.io/topics/sentinel-clients>`_ to learn more about Redis Sentinel.
|
||||
|
||||
Scan Iterators
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
The \*SCAN commands introduced in Redis 2.8 can be cumbersome to use. While
|
||||
these commands are fully supported, redis-py also exposes the following methods
|
||||
that return Python iterators for convenience: `scan_iter`, `hscan_iter`,
|
||||
`sscan_iter` and `zscan_iter`.
|
||||
|
||||
.. code-block:: pycon
|
||||
|
||||
>>> for key, value in (('A', '1'), ('B', '2'), ('C', '3')):
|
||||
... r.set(key, value)
|
||||
>>> for key in r.scan_iter():
|
||||
... print(key, r.get(key))
|
||||
A 1
|
||||
B 2
|
||||
C 3
|
||||
|
||||
Author
|
||||
^^^^^^
|
||||
|
||||
redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com).
|
||||
It can be found here: https://github.com/andymccurdy/redis-py
|
||||
|
||||
Special thanks to:
|
||||
|
||||
* Ludovico Magnocavallo, author of the original Python Redis client, from
|
||||
which some of the socket code is still used.
|
||||
* Alexander Solovyov for ideas on the generic response callback system.
|
||||
* Paul Hubbard for initial packaging support.
|
||||
|
||||
|
@ -0,0 +1,22 @@
|
||||
redis-3.5.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
redis-3.5.2.dist-info/LICENSE,sha256=eQFI2MEvijiycHp0viNDMWutEmmV_1SAGhgbiyMboSQ,1074
|
||||
redis-3.5.2.dist-info/METADATA,sha256=1S43bhBSoRk6JkBbRF2FaUDzr48m32yHicEy6hrjZLw,36674
|
||||
redis-3.5.2.dist-info/RECORD,,
|
||||
redis-3.5.2.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
|
||||
redis-3.5.2.dist-info/top_level.txt,sha256=OMAefszlde6ZoOtlM35AWzpRIrwtcqAMHGlRit-w2-4,6
|
||||
redis/__init__.py,sha256=U3eh1OAZ87NT6pppHLMWmApe8_2YoOMj7sy1N8m3dT4,1209
|
||||
redis/__pycache__/__init__.cpython-37.pyc,,
|
||||
redis/__pycache__/_compat.cpython-37.pyc,,
|
||||
redis/__pycache__/client.cpython-37.pyc,,
|
||||
redis/__pycache__/connection.cpython-37.pyc,,
|
||||
redis/__pycache__/exceptions.cpython-37.pyc,,
|
||||
redis/__pycache__/lock.cpython-37.pyc,,
|
||||
redis/__pycache__/sentinel.cpython-37.pyc,,
|
||||
redis/__pycache__/utils.cpython-37.pyc,,
|
||||
redis/_compat.py,sha256=opM78DdCy4D86p9cpN_O81yNgjVDUwOJGLtMS4LL9-0,5698
|
||||
redis/client.py,sha256=O5zjv95LO7_TnsPfaTHXNvqNiMqzs_1wD59CI7UBeHk,159479
|
||||
redis/connection.py,sha256=MXU__pk5cWt5OAaLZzoTBezHtq2SBksT97HIK3iCb4U,54481
|
||||
redis/exceptions.py,sha256=phjjyJjnebrM82XDzfjtreGnkWIoSNfDZiyoWs3_zQE,1341
|
||||
redis/lock.py,sha256=VNfWNN46FBwhcPUnFmzC8N8uLuxCsu2YT2drkEzM6_U,11349
|
||||
redis/sentinel.py,sha256=IKzrrtgzbjVvI7r50DwKW3pK_yoNIBkLiKskYsOm5_M,11359
|
||||
redis/utils.py,sha256=wG1Ws79_HgIzAALwYwK4CrVLLloVTRPRqjo1gxF4U7U,674
|
@ -0,0 +1,6 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.34.2)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py2-none-any
|
||||
Tag: py3-none-any
|
||||
|
@ -0,0 +1 @@
|
||||
redis
|
59
infrastructure/lambda/task_queue_manager/redis/__init__.py
Normal file
59
infrastructure/lambda/task_queue_manager/redis/__init__.py
Normal file
@ -0,0 +1,59 @@
|
||||
from redis.client import Redis, StrictRedis
|
||||
from redis.connection import (
|
||||
BlockingConnectionPool,
|
||||
ConnectionPool,
|
||||
Connection,
|
||||
SSLConnection,
|
||||
UnixDomainSocketConnection
|
||||
)
|
||||
from redis.utils import from_url
|
||||
from redis.exceptions import (
|
||||
AuthenticationError,
|
||||
AuthenticationWrongNumberOfArgsError,
|
||||
BusyLoadingError,
|
||||
ChildDeadlockedError,
|
||||
ConnectionError,
|
||||
DataError,
|
||||
InvalidResponse,
|
||||
PubSubError,
|
||||
ReadOnlyError,
|
||||
RedisError,
|
||||
ResponseError,
|
||||
TimeoutError,
|
||||
WatchError
|
||||
)
|
||||
|
||||
|
||||
def int_or_str(value):
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
return value
|
||||
|
||||
|
||||
__version__ = '3.5.2'
|
||||
VERSION = tuple(map(int_or_str, __version__.split('.')))
|
||||
|
||||
__all__ = [
|
||||
'AuthenticationError',
|
||||
'AuthenticationWrongNumberOfArgsError',
|
||||
'BlockingConnectionPool',
|
||||
'BusyLoadingError',
|
||||
'ChildDeadlockedError',
|
||||
'Connection',
|
||||
'ConnectionError',
|
||||
'ConnectionPool',
|
||||
'DataError',
|
||||
'from_url',
|
||||
'InvalidResponse',
|
||||
'PubSubError',
|
||||
'ReadOnlyError',
|
||||
'Redis',
|
||||
'RedisError',
|
||||
'ResponseError',
|
||||
'SSLConnection',
|
||||
'StrictRedis',
|
||||
'TimeoutError',
|
||||
'UnixDomainSocketConnection',
|
||||
'WatchError',
|
||||
]
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
188
infrastructure/lambda/task_queue_manager/redis/_compat.py
Normal file
188
infrastructure/lambda/task_queue_manager/redis/_compat.py
Normal file
@ -0,0 +1,188 @@
|
||||
"""Internal module for Python 2 backwards compatibility."""
|
||||
# flake8: noqa
|
||||
import errno
|
||||
import socket
|
||||
import sys
|
||||
|
||||
|
||||
def sendall(sock, *args, **kwargs):
|
||||
return sock.sendall(*args, **kwargs)
|
||||
|
||||
|
||||
def shutdown(sock, *args, **kwargs):
|
||||
return sock.shutdown(*args, **kwargs)
|
||||
|
||||
|
||||
def ssl_wrap_socket(context, sock, *args, **kwargs):
|
||||
return context.wrap_socket(sock, *args, **kwargs)
|
||||
|
||||
|
||||
# For Python older than 3.5, retry EINTR.
|
||||
if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and
|
||||
sys.version_info[1] < 5):
|
||||
# Adapted from https://bugs.python.org/review/23863/patch/14532/54418
|
||||
import time
|
||||
|
||||
# Wrapper for handling interruptable system calls.
|
||||
def _retryable_call(s, func, *args, **kwargs):
|
||||
# Some modules (SSL) use the _fileobject wrapper directly and
|
||||
# implement a smaller portion of the socket interface, thus we
|
||||
# need to let them continue to do so.
|
||||
timeout, deadline = None, 0.0
|
||||
attempted = False
|
||||
try:
|
||||
timeout = s.gettimeout()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if timeout:
|
||||
deadline = time.time() + timeout
|
||||
|
||||
try:
|
||||
while True:
|
||||
if attempted and timeout:
|
||||
now = time.time()
|
||||
if now >= deadline:
|
||||
raise socket.error(errno.EWOULDBLOCK, "timed out")
|
||||
else:
|
||||
# Overwrite the timeout on the socket object
|
||||
# to take into account elapsed time.
|
||||
s.settimeout(deadline - now)
|
||||
try:
|
||||
attempted = True
|
||||
return func(*args, **kwargs)
|
||||
except socket.error as e:
|
||||
if e.args[0] == errno.EINTR:
|
||||
continue
|
||||
raise
|
||||
finally:
|
||||
# Set the existing timeout back for future
|
||||
# calls.
|
||||
if timeout:
|
||||
s.settimeout(timeout)
|
||||
|
||||
def recv(sock, *args, **kwargs):
|
||||
return _retryable_call(sock, sock.recv, *args, **kwargs)
|
||||
|
||||
def recv_into(sock, *args, **kwargs):
|
||||
return _retryable_call(sock, sock.recv_into, *args, **kwargs)
|
||||
|
||||
else: # Python 3.5 and above automatically retry EINTR
|
||||
def recv(sock, *args, **kwargs):
|
||||
return sock.recv(*args, **kwargs)
|
||||
|
||||
def recv_into(sock, *args, **kwargs):
|
||||
return sock.recv_into(*args, **kwargs)
|
||||
|
||||
if sys.version_info[0] < 3:
|
||||
# In Python 3, the ssl module raises socket.timeout whereas it raises
|
||||
# SSLError in Python 2. For compatibility between versions, ensure
|
||||
# socket.timeout is raised for both.
|
||||
import functools
|
||||
|
||||
try:
|
||||
from ssl import SSLError as _SSLError
|
||||
except ImportError:
|
||||
class _SSLError(Exception):
|
||||
"""A replacement in case ssl.SSLError is not available."""
|
||||
pass
|
||||
|
||||
_EXPECTED_SSL_TIMEOUT_MESSAGES = (
|
||||
"The handshake operation timed out",
|
||||
"The read operation timed out",
|
||||
"The write operation timed out",
|
||||
)
|
||||
|
||||
def _handle_ssl_timeout(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except _SSLError as e:
|
||||
message = len(e.args) == 1 and unicode(e.args[0]) or ''
|
||||
if any(x in message for x in _EXPECTED_SSL_TIMEOUT_MESSAGES):
|
||||
# Raise socket.timeout for compatibility with Python 3.
|
||||
raise socket.timeout(*e.args)
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
recv = _handle_ssl_timeout(recv)
|
||||
recv_into = _handle_ssl_timeout(recv_into)
|
||||
sendall = _handle_ssl_timeout(sendall)
|
||||
shutdown = _handle_ssl_timeout(shutdown)
|
||||
ssl_wrap_socket = _handle_ssl_timeout(ssl_wrap_socket)
|
||||
|
||||
if sys.version_info[0] < 3:
|
||||
from urllib import unquote
|
||||
from urlparse import parse_qs, urlparse
|
||||
from itertools import imap, izip
|
||||
from string import letters as ascii_letters
|
||||
from Queue import Queue
|
||||
|
||||
# special unicode handling for python2 to avoid UnicodeDecodeError
|
||||
def safe_unicode(obj, *args):
|
||||
""" return the unicode representation of obj """
|
||||
try:
|
||||
return unicode(obj, *args)
|
||||
except UnicodeDecodeError:
|
||||
# obj is byte string
|
||||
ascii_text = str(obj).encode('string_escape')
|
||||
return unicode(ascii_text)
|
||||
|
||||
def iteritems(x):
|
||||
return x.iteritems()
|
||||
|
||||
def iterkeys(x):
|
||||
return x.iterkeys()
|
||||
|
||||
def itervalues(x):
|
||||
return x.itervalues()
|
||||
|
||||
def nativestr(x):
|
||||
return x if isinstance(x, str) else x.encode('utf-8', 'replace')
|
||||
|
||||
def next(x):
|
||||
return x.next()
|
||||
|
||||
unichr = unichr
|
||||
xrange = xrange
|
||||
basestring = basestring
|
||||
unicode = unicode
|
||||
long = long
|
||||
BlockingIOError = socket.error
|
||||
else:
|
||||
from urllib.parse import parse_qs, unquote, urlparse
|
||||
from string import ascii_letters
|
||||
from queue import Queue
|
||||
|
||||
def iteritems(x):
|
||||
return iter(x.items())
|
||||
|
||||
def iterkeys(x):
|
||||
return iter(x.keys())
|
||||
|
||||
def itervalues(x):
|
||||
return iter(x.values())
|
||||
|
||||
def nativestr(x):
|
||||
return x if isinstance(x, str) else x.decode('utf-8', 'replace')
|
||||
|
||||
def safe_unicode(value):
|
||||
if isinstance(value, bytes):
|
||||
value = value.decode('utf-8', 'replace')
|
||||
return str(value)
|
||||
|
||||
next = next
|
||||
unichr = chr
|
||||
imap = map
|
||||
izip = zip
|
||||
xrange = range
|
||||
basestring = str
|
||||
unicode = str
|
||||
long = int
|
||||
BlockingIOError = BlockingIOError
|
||||
|
||||
try: # Python 3
|
||||
from queue import LifoQueue, Empty, Full
|
||||
except ImportError: # Python 2
|
||||
from Queue import LifoQueue, Empty, Full
|
4170
infrastructure/lambda/task_queue_manager/redis/client.py
Normal file
4170
infrastructure/lambda/task_queue_manager/redis/client.py
Normal file
File diff suppressed because it is too large
Load Diff
1384
infrastructure/lambda/task_queue_manager/redis/connection.py
Normal file
1384
infrastructure/lambda/task_queue_manager/redis/connection.py
Normal file
File diff suppressed because it is too large
Load Diff
82
infrastructure/lambda/task_queue_manager/redis/exceptions.py
Normal file
82
infrastructure/lambda/task_queue_manager/redis/exceptions.py
Normal file
@ -0,0 +1,82 @@
|
||||
"Core exceptions raised by the Redis client"
|
||||
|
||||
|
||||
class RedisError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ConnectionError(RedisError):
|
||||
pass
|
||||
|
||||
|
||||
class TimeoutError(RedisError):
|
||||
pass
|
||||
|
||||
|
||||
class AuthenticationError(ConnectionError):
|
||||
pass
|
||||
|
||||
|
||||
class BusyLoadingError(ConnectionError):
|
||||
pass
|
||||
|
||||
|
||||
class InvalidResponse(RedisError):
|
||||
pass
|
||||
|
||||
|
||||
class ResponseError(RedisError):
|
||||
pass
|
||||
|
||||
|
||||
class DataError(RedisError):
|
||||
pass
|
||||
|
||||
|
||||
class PubSubError(RedisError):
|
||||
pass
|
||||
|
||||
|
||||
class WatchError(RedisError):
|
||||
pass
|
||||
|
||||
|
||||
class NoScriptError(ResponseError):
|
||||
pass
|
||||
|
||||
|
||||
class ExecAbortError(ResponseError):
|
||||
pass
|
||||
|
||||
|
||||
class ReadOnlyError(ResponseError):
|
||||
pass
|
||||
|
||||
|
||||
class NoPermissionError(ResponseError):
|
||||
pass
|
||||
|
||||
|
||||
class LockError(RedisError, ValueError):
|
||||
"Errors acquiring or releasing a lock"
|
||||
# NOTE: For backwards compatability, this class derives from ValueError.
|
||||
# This was originally chosen to behave like threading.Lock.
|
||||
pass
|
||||
|
||||
|
||||
class LockNotOwnedError(LockError):
|
||||
"Error trying to extend or release a lock that is (no longer) owned"
|
||||
pass
|
||||
|
||||
|
||||
class ChildDeadlockedError(Exception):
|
||||
"Error indicating that a child process is deadlocked after a fork()"
|
||||
pass
|
||||
|
||||
|
||||
class AuthenticationWrongNumberOfArgsError(ResponseError):
|
||||
"""
|
||||
An error to indicate that the wrong number of args
|
||||
were sent to the AUTH command
|
||||
"""
|
||||
pass
|
293
infrastructure/lambda/task_queue_manager/redis/lock.py
Normal file
293
infrastructure/lambda/task_queue_manager/redis/lock.py
Normal file
@ -0,0 +1,293 @@
|
||||
import threading
|
||||
import time as mod_time
|
||||
import uuid
|
||||
from redis.exceptions import LockError, LockNotOwnedError
|
||||
from redis.utils import dummy
|
||||
|
||||
|
||||
class Lock(object):
|
||||
"""
|
||||
A shared, distributed Lock. Using Redis for locking allows the Lock
|
||||
to be shared across processes and/or machines.
|
||||
|
||||
It's left to the user to resolve deadlock issues and make sure
|
||||
multiple clients play nicely together.
|
||||
"""
|
||||
|
||||
lua_release = None
|
||||
lua_extend = None
|
||||
lua_reacquire = None
|
||||
|
||||
# KEYS[1] - lock name
|
||||
# ARGV[1] - token
|
||||
# return 1 if the lock was released, otherwise 0
|
||||
LUA_RELEASE_SCRIPT = """
|
||||
local token = redis.call('get', KEYS[1])
|
||||
if not token or token ~= ARGV[1] then
|
||||
return 0
|
||||
end
|
||||
redis.call('del', KEYS[1])
|
||||
return 1
|
||||
"""
|
||||
|
||||
# KEYS[1] - lock name
|
||||
# ARGV[1] - token
|
||||
# ARGV[2] - additional milliseconds
|
||||
# ARGV[3] - "0" if the additional time should be added to the lock's
|
||||
# existing ttl or "1" if the existing ttl should be replaced
|
||||
# return 1 if the locks time was extended, otherwise 0
|
||||
LUA_EXTEND_SCRIPT = """
|
||||
local token = redis.call('get', KEYS[1])
|
||||
if not token or token ~= ARGV[1] then
|
||||
return 0
|
||||
end
|
||||
local expiration = redis.call('pttl', KEYS[1])
|
||||
if not expiration then
|
||||
expiration = 0
|
||||
end
|
||||
if expiration < 0 then
|
||||
return 0
|
||||
end
|
||||
|
||||
local newttl = ARGV[2]
|
||||
if ARGV[3] == "0" then
|
||||
newttl = ARGV[2] + expiration
|
||||
end
|
||||
redis.call('pexpire', KEYS[1], newttl)
|
||||
return 1
|
||||
"""
|
||||
|
||||
# KEYS[1] - lock name
|
||||
# ARGV[1] - token
|
||||
# ARGV[2] - milliseconds
|
||||
# return 1 if the locks time was reacquired, otherwise 0
|
||||
LUA_REACQUIRE_SCRIPT = """
|
||||
local token = redis.call('get', KEYS[1])
|
||||
if not token or token ~= ARGV[1] then
|
||||
return 0
|
||||
end
|
||||
redis.call('pexpire', KEYS[1], ARGV[2])
|
||||
return 1
|
||||
"""
|
||||
|
||||
def __init__(self, redis, name, timeout=None, sleep=0.1,
|
||||
blocking=True, blocking_timeout=None, thread_local=True):
|
||||
"""
|
||||
Create a new Lock instance named ``name`` using the Redis client
|
||||
supplied by ``redis``.
|
||||
|
||||
``timeout`` indicates a maximum life for the lock.
|
||||
By default, it will remain locked until release() is called.
|
||||
``timeout`` can be specified as a float or integer, both representing
|
||||
the number of seconds to wait.
|
||||
|
||||
``sleep`` indicates the amount of time to sleep per loop iteration
|
||||
when the lock is in blocking mode and another client is currently
|
||||
holding the lock.
|
||||
|
||||
``blocking`` indicates whether calling ``acquire`` should block until
|
||||
the lock has been acquired or to fail immediately, causing ``acquire``
|
||||
to return False and the lock not being acquired. Defaults to True.
|
||||
Note this value can be overridden by passing a ``blocking``
|
||||
argument to ``acquire``.
|
||||
|
||||
``blocking_timeout`` indicates the maximum amount of time in seconds to
|
||||
spend trying to acquire the lock. A value of ``None`` indicates
|
||||
continue trying forever. ``blocking_timeout`` can be specified as a
|
||||
float or integer, both representing the number of seconds to wait.
|
||||
|
||||
``thread_local`` indicates whether the lock token is placed in
|
||||
thread-local storage. By default, the token is placed in thread local
|
||||
storage so that a thread only sees its token, not a token set by
|
||||
another thread. Consider the following timeline:
|
||||
|
||||
time: 0, thread-1 acquires `my-lock`, with a timeout of 5 seconds.
|
||||
thread-1 sets the token to "abc"
|
||||
time: 1, thread-2 blocks trying to acquire `my-lock` using the
|
||||
Lock instance.
|
||||
time: 5, thread-1 has not yet completed. redis expires the lock
|
||||
key.
|
||||
time: 5, thread-2 acquired `my-lock` now that it's available.
|
||||
thread-2 sets the token to "xyz"
|
||||
time: 6, thread-1 finishes its work and calls release(). if the
|
||||
token is *not* stored in thread local storage, then
|
||||
thread-1 would see the token value as "xyz" and would be
|
||||
able to successfully release the thread-2's lock.
|
||||
|
||||
In some use cases it's necessary to disable thread local storage. For
|
||||
example, if you have code where one thread acquires a lock and passes
|
||||
that lock instance to a worker thread to release later. If thread
|
||||
local storage isn't disabled in this case, the worker thread won't see
|
||||
the token set by the thread that acquired the lock. Our assumption
|
||||
is that these cases aren't common and as such default to using
|
||||
thread local storage.
|
||||
"""
|
||||
self.redis = redis
|
||||
self.name = name
|
||||
self.timeout = timeout
|
||||
self.sleep = sleep
|
||||
self.blocking = blocking
|
||||
self.blocking_timeout = blocking_timeout
|
||||
self.thread_local = bool(thread_local)
|
||||
self.local = threading.local() if self.thread_local else dummy()
|
||||
self.local.token = None
|
||||
self.register_scripts()
|
||||
|
||||
def register_scripts(self):
|
||||
cls = self.__class__
|
||||
client = self.redis
|
||||
if cls.lua_release is None:
|
||||
cls.lua_release = client.register_script(cls.LUA_RELEASE_SCRIPT)
|
||||
if cls.lua_extend is None:
|
||||
cls.lua_extend = client.register_script(cls.LUA_EXTEND_SCRIPT)
|
||||
if cls.lua_reacquire is None:
|
||||
cls.lua_reacquire = \
|
||||
client.register_script(cls.LUA_REACQUIRE_SCRIPT)
|
||||
|
||||
def __enter__(self):
|
||||
# force blocking, as otherwise the user would have to check whether
|
||||
# the lock was actually acquired or not.
|
||||
if self.acquire(blocking=True):
|
||||
return self
|
||||
raise LockError("Unable to acquire lock within the time specified")
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
self.release()
|
||||
|
||||
def acquire(self, blocking=None, blocking_timeout=None, token=None):
|
||||
"""
|
||||
Use Redis to hold a shared, distributed lock named ``name``.
|
||||
Returns True once the lock is acquired.
|
||||
|
||||
If ``blocking`` is False, always return immediately. If the lock
|
||||
was acquired, return True, otherwise return False.
|
||||
|
||||
``blocking_timeout`` specifies the maximum number of seconds to
|
||||
wait trying to acquire the lock.
|
||||
|
||||
``token`` specifies the token value to be used. If provided, token
|
||||
must be a bytes object or a string that can be encoded to a bytes
|
||||
object with the default encoding. If a token isn't specified, a UUID
|
||||
will be generated.
|
||||
"""
|
||||
sleep = self.sleep
|
||||
if token is None:
|
||||
token = uuid.uuid1().hex.encode()
|
||||
else:
|
||||
encoder = self.redis.connection_pool.get_encoder()
|
||||
token = encoder.encode(token)
|
||||
if blocking is None:
|
||||
blocking = self.blocking
|
||||
if blocking_timeout is None:
|
||||
blocking_timeout = self.blocking_timeout
|
||||
stop_trying_at = None
|
||||
if blocking_timeout is not None:
|
||||
stop_trying_at = mod_time.time() + blocking_timeout
|
||||
while True:
|
||||
if self.do_acquire(token):
|
||||
self.local.token = token
|
||||
return True
|
||||
if not blocking:
|
||||
return False
|
||||
next_try_at = mod_time.time() + sleep
|
||||
if stop_trying_at is not None and next_try_at > stop_trying_at:
|
||||
return False
|
||||
mod_time.sleep(sleep)
|
||||
|
||||
def do_acquire(self, token):
|
||||
if self.timeout:
|
||||
# convert to milliseconds
|
||||
timeout = int(self.timeout * 1000)
|
||||
else:
|
||||
timeout = None
|
||||
if self.redis.set(self.name, token, nx=True, px=timeout):
|
||||
return True
|
||||
return False
|
||||
|
||||
def locked(self):
|
||||
"""
|
||||
Returns True if this key is locked by any process, otherwise False.
|
||||
"""
|
||||
return self.redis.get(self.name) is not None
|
||||
|
||||
def owned(self):
|
||||
"""
|
||||
Returns True if this key is locked by this lock, otherwise False.
|
||||
"""
|
||||
stored_token = self.redis.get(self.name)
|
||||
# need to always compare bytes to bytes
|
||||
# TODO: this can be simplified when the context manager is finished
|
||||
if stored_token and not isinstance(stored_token, bytes):
|
||||
encoder = self.redis.connection_pool.get_encoder()
|
||||
stored_token = encoder.encode(stored_token)
|
||||
return self.local.token is not None and \
|
||||
stored_token == self.local.token
|
||||
|
||||
def release(self):
|
||||
"Releases the already acquired lock"
|
||||
expected_token = self.local.token
|
||||
if expected_token is None:
|
||||
raise LockError("Cannot release an unlocked lock")
|
||||
self.local.token = None
|
||||
self.do_release(expected_token)
|
||||
|
||||
def do_release(self, expected_token):
|
||||
if not bool(self.lua_release(keys=[self.name],
|
||||
args=[expected_token],
|
||||
client=self.redis)):
|
||||
raise LockNotOwnedError("Cannot release a lock"
|
||||
" that's no longer owned")
|
||||
|
||||
def extend(self, additional_time, replace_ttl=False):
|
||||
"""
|
||||
Adds more time to an already acquired lock.
|
||||
|
||||
``additional_time`` can be specified as an integer or a float, both
|
||||
representing the number of seconds to add.
|
||||
|
||||
``replace_ttl`` if False (the default), add `additional_time` to
|
||||
the lock's existing ttl. If True, replace the lock's ttl with
|
||||
`additional_time`.
|
||||
"""
|
||||
if self.local.token is None:
|
||||
raise LockError("Cannot extend an unlocked lock")
|
||||
if self.timeout is None:
|
||||
raise LockError("Cannot extend a lock with no timeout")
|
||||
return self.do_extend(additional_time, replace_ttl)
|
||||
|
||||
def do_extend(self, additional_time, replace_ttl):
|
||||
additional_time = int(additional_time * 1000)
|
||||
if not bool(
|
||||
self.lua_extend(
|
||||
keys=[self.name],
|
||||
args=[
|
||||
self.local.token,
|
||||
additional_time,
|
||||
replace_ttl and "1" or "0"
|
||||
],
|
||||
client=self.redis,
|
||||
)
|
||||
):
|
||||
raise LockNotOwnedError(
|
||||
"Cannot extend a lock that's" " no longer owned"
|
||||
)
|
||||
return True
|
||||
|
||||
def reacquire(self):
|
||||
"""
|
||||
Resets a TTL of an already acquired lock back to a timeout value.
|
||||
"""
|
||||
if self.local.token is None:
|
||||
raise LockError("Cannot reacquire an unlocked lock")
|
||||
if self.timeout is None:
|
||||
raise LockError("Cannot reacquire a lock with no timeout")
|
||||
return self.do_reacquire()
|
||||
|
||||
def do_reacquire(self):
|
||||
timeout = int(self.timeout * 1000)
|
||||
if not bool(self.lua_reacquire(keys=[self.name],
|
||||
args=[self.local.token, timeout],
|
||||
client=self.redis)):
|
||||
raise LockNotOwnedError("Cannot reacquire a lock that's"
|
||||
" no longer owned")
|
||||
return True
|
286
infrastructure/lambda/task_queue_manager/redis/sentinel.py
Normal file
286
infrastructure/lambda/task_queue_manager/redis/sentinel.py
Normal file
@ -0,0 +1,286 @@
|
||||
import random
|
||||
import weakref
|
||||
|
||||
from redis.client import Redis
|
||||
from redis.connection import ConnectionPool, Connection
|
||||
from redis.exceptions import (ConnectionError, ResponseError, ReadOnlyError,
|
||||
TimeoutError)
|
||||
from redis._compat import iteritems, nativestr, xrange
|
||||
|
||||
|
||||
class MasterNotFoundError(ConnectionError):
|
||||
pass
|
||||
|
||||
|
||||
class SlaveNotFoundError(ConnectionError):
|
||||
pass
|
||||
|
||||
|
||||
class SentinelManagedConnection(Connection):
|
||||
def __init__(self, **kwargs):
|
||||
self.connection_pool = kwargs.pop('connection_pool')
|
||||
super(SentinelManagedConnection, self).__init__(**kwargs)
|
||||
|
||||
def __repr__(self):
|
||||
pool = self.connection_pool
|
||||
s = '%s<service=%s%%s>' % (type(self).__name__, pool.service_name)
|
||||
if self.host:
|
||||
host_info = ',host=%s,port=%s' % (self.host, self.port)
|
||||
s = s % host_info
|
||||
return s
|
||||
|
||||
def connect_to(self, address):
|
||||
self.host, self.port = address
|
||||
super(SentinelManagedConnection, self).connect()
|
||||
if self.connection_pool.check_connection:
|
||||
self.send_command('PING')
|
||||
if nativestr(self.read_response()) != 'PONG':
|
||||
raise ConnectionError('PING failed')
|
||||
|
||||
def connect(self):
|
||||
if self._sock:
|
||||
return # already connected
|
||||
if self.connection_pool.is_master:
|
||||
self.connect_to(self.connection_pool.get_master_address())
|
||||
else:
|
||||
for slave in self.connection_pool.rotate_slaves():
|
||||
try:
|
||||
return self.connect_to(slave)
|
||||
except ConnectionError:
|
||||
continue
|
||||
raise SlaveNotFoundError # Never be here
|
||||
|
||||
def read_response(self):
|
||||
try:
|
||||
return super(SentinelManagedConnection, self).read_response()
|
||||
except ReadOnlyError:
|
||||
if self.connection_pool.is_master:
|
||||
# When talking to a master, a ReadOnlyError when likely
|
||||
# indicates that the previous master that we're still connected
|
||||
# to has been demoted to a slave and there's a new master.
|
||||
# calling disconnect will force the connection to re-query
|
||||
# sentinel during the next connect() attempt.
|
||||
self.disconnect()
|
||||
raise ConnectionError('The previous master is now a slave')
|
||||
raise
|
||||
|
||||
|
||||
class SentinelConnectionPool(ConnectionPool):
|
||||
"""
|
||||
Sentinel backed connection pool.
|
||||
|
||||
If ``check_connection`` flag is set to True, SentinelManagedConnection
|
||||
sends a PING command right after establishing the connection.
|
||||
"""
|
||||
|
||||
def __init__(self, service_name, sentinel_manager, **kwargs):
|
||||
kwargs['connection_class'] = kwargs.get(
|
||||
'connection_class', SentinelManagedConnection)
|
||||
self.is_master = kwargs.pop('is_master', True)
|
||||
self.check_connection = kwargs.pop('check_connection', False)
|
||||
super(SentinelConnectionPool, self).__init__(**kwargs)
|
||||
self.connection_kwargs['connection_pool'] = weakref.proxy(self)
|
||||
self.service_name = service_name
|
||||
self.sentinel_manager = sentinel_manager
|
||||
|
||||
def __repr__(self):
|
||||
return "%s<service=%s(%s)" % (
|
||||
type(self).__name__,
|
||||
self.service_name,
|
||||
self.is_master and 'master' or 'slave',
|
||||
)
|
||||
|
||||
def reset(self):
|
||||
super(SentinelConnectionPool, self).reset()
|
||||
self.master_address = None
|
||||
self.slave_rr_counter = None
|
||||
|
||||
def get_master_address(self):
|
||||
master_address = self.sentinel_manager.discover_master(
|
||||
self.service_name)
|
||||
if self.is_master:
|
||||
if self.master_address is None:
|
||||
self.master_address = master_address
|
||||
elif master_address != self.master_address:
|
||||
# Master address changed, disconnect all clients in this pool
|
||||
self.disconnect()
|
||||
return master_address
|
||||
|
||||
def rotate_slaves(self):
|
||||
"Round-robin slave balancer"
|
||||
slaves = self.sentinel_manager.discover_slaves(self.service_name)
|
||||
if slaves:
|
||||
if self.slave_rr_counter is None:
|
||||
self.slave_rr_counter = random.randint(0, len(slaves) - 1)
|
||||
for _ in xrange(len(slaves)):
|
||||
self.slave_rr_counter = (
|
||||
self.slave_rr_counter + 1) % len(slaves)
|
||||
slave = slaves[self.slave_rr_counter]
|
||||
yield slave
|
||||
# Fallback to the master connection
|
||||
try:
|
||||
yield self.get_master_address()
|
||||
except MasterNotFoundError:
|
||||
pass
|
||||
raise SlaveNotFoundError('No slave found for %r' % (self.service_name))
|
||||
|
||||
|
||||
class Sentinel(object):
|
||||
"""
|
||||
Redis Sentinel cluster client
|
||||
|
||||
>>> from redis.sentinel import Sentinel
|
||||
>>> sentinel = Sentinel([('localhost', 26379)], socket_timeout=0.1)
|
||||
>>> master = sentinel.master_for('mymaster', socket_timeout=0.1)
|
||||
>>> master.set('foo', 'bar')
|
||||
>>> slave = sentinel.slave_for('mymaster', socket_timeout=0.1)
|
||||
>>> slave.get('foo')
|
||||
b'bar'
|
||||
|
||||
``sentinels`` is a list of sentinel nodes. Each node is represented by
|
||||
a pair (hostname, port).
|
||||
|
||||
``min_other_sentinels`` defined a minimum number of peers for a sentinel.
|
||||
When querying a sentinel, if it doesn't meet this threshold, responses
|
||||
from that sentinel won't be considered valid.
|
||||
|
||||
``sentinel_kwargs`` is a dictionary of connection arguments used when
|
||||
connecting to sentinel instances. Any argument that can be passed to
|
||||
a normal Redis connection can be specified here. If ``sentinel_kwargs`` is
|
||||
not specified, any socket_timeout and socket_keepalive options specified
|
||||
in ``connection_kwargs`` will be used.
|
||||
|
||||
``connection_kwargs`` are keyword arguments that will be used when
|
||||
establishing a connection to a Redis server.
|
||||
"""
|
||||
|
||||
def __init__(self, sentinels, min_other_sentinels=0, sentinel_kwargs=None,
|
||||
**connection_kwargs):
|
||||
# if sentinel_kwargs isn't defined, use the socket_* options from
|
||||
# connection_kwargs
|
||||
if sentinel_kwargs is None:
|
||||
sentinel_kwargs = {
|
||||
k: v
|
||||
for k, v in iteritems(connection_kwargs)
|
||||
if k.startswith('socket_')
|
||||
}
|
||||
self.sentinel_kwargs = sentinel_kwargs
|
||||
|
||||
self.sentinels = [Redis(hostname, port, **self.sentinel_kwargs)
|
||||
for hostname, port in sentinels]
|
||||
self.min_other_sentinels = min_other_sentinels
|
||||
self.connection_kwargs = connection_kwargs
|
||||
|
||||
def __repr__(self):
|
||||
sentinel_addresses = []
|
||||
for sentinel in self.sentinels:
|
||||
sentinel_addresses.append('%s:%s' % (
|
||||
sentinel.connection_pool.connection_kwargs['host'],
|
||||
sentinel.connection_pool.connection_kwargs['port'],
|
||||
))
|
||||
return '%s<sentinels=[%s]>' % (
|
||||
type(self).__name__,
|
||||
','.join(sentinel_addresses))
|
||||
|
||||
def check_master_state(self, state, service_name):
|
||||
if not state['is_master'] or state['is_sdown'] or state['is_odown']:
|
||||
return False
|
||||
# Check if our sentinel doesn't see other nodes
|
||||
if state['num-other-sentinels'] < self.min_other_sentinels:
|
||||
return False
|
||||
return True
|
||||
|
||||
def discover_master(self, service_name):
|
||||
"""
|
||||
Asks sentinel servers for the Redis master's address corresponding
|
||||
to the service labeled ``service_name``.
|
||||
|
||||
Returns a pair (address, port) or raises MasterNotFoundError if no
|
||||
master is found.
|
||||
"""
|
||||
for sentinel_no, sentinel in enumerate(self.sentinels):
|
||||
try:
|
||||
masters = sentinel.sentinel_masters()
|
||||
except (ConnectionError, TimeoutError):
|
||||
continue
|
||||
state = masters.get(service_name)
|
||||
if state and self.check_master_state(state, service_name):
|
||||
# Put this sentinel at the top of the list
|
||||
self.sentinels[0], self.sentinels[sentinel_no] = (
|
||||
sentinel, self.sentinels[0])
|
||||
return state['ip'], state['port']
|
||||
raise MasterNotFoundError("No master found for %r" % (service_name,))
|
||||
|
||||
def filter_slaves(self, slaves):
|
||||
"Remove slaves that are in an ODOWN or SDOWN state"
|
||||
slaves_alive = []
|
||||
for slave in slaves:
|
||||
if slave['is_odown'] or slave['is_sdown']:
|
||||
continue
|
||||
slaves_alive.append((slave['ip'], slave['port']))
|
||||
return slaves_alive
|
||||
|
||||
def discover_slaves(self, service_name):
|
||||
"Returns a list of alive slaves for service ``service_name``"
|
||||
for sentinel in self.sentinels:
|
||||
try:
|
||||
slaves = sentinel.sentinel_slaves(service_name)
|
||||
except (ConnectionError, ResponseError, TimeoutError):
|
||||
continue
|
||||
slaves = self.filter_slaves(slaves)
|
||||
if slaves:
|
||||
return slaves
|
||||
return []
|
||||
|
||||
def master_for(self, service_name, redis_class=Redis,
|
||||
connection_pool_class=SentinelConnectionPool, **kwargs):
|
||||
"""
|
||||
Returns a redis client instance for the ``service_name`` master.
|
||||
|
||||
A SentinelConnectionPool class is used to retrive the master's
|
||||
address before establishing a new connection.
|
||||
|
||||
NOTE: If the master's address has changed, any cached connections to
|
||||
the old master are closed.
|
||||
|
||||
By default clients will be a redis.Redis instance. Specify a
|
||||
different class to the ``redis_class`` argument if you desire
|
||||
something different.
|
||||
|
||||
The ``connection_pool_class`` specifies the connection pool to use.
|
||||
The SentinelConnectionPool will be used by default.
|
||||
|
||||
All other keyword arguments are merged with any connection_kwargs
|
||||
passed to this class and passed to the connection pool as keyword
|
||||
arguments to be used to initialize Redis connections.
|
||||
"""
|
||||
kwargs['is_master'] = True
|
||||
connection_kwargs = dict(self.connection_kwargs)
|
||||
connection_kwargs.update(kwargs)
|
||||
return redis_class(connection_pool=connection_pool_class(
|
||||
service_name, self, **connection_kwargs))
|
||||
|
||||
def slave_for(self, service_name, redis_class=Redis,
|
||||
connection_pool_class=SentinelConnectionPool, **kwargs):
|
||||
"""
|
||||
Returns redis client instance for the ``service_name`` slave(s).
|
||||
|
||||
A SentinelConnectionPool class is used to retrive the slave's
|
||||
address before establishing a new connection.
|
||||
|
||||
By default clients will be a redis.Redis instance. Specify a
|
||||
different class to the ``redis_class`` argument if you desire
|
||||
something different.
|
||||
|
||||
The ``connection_pool_class`` specifies the connection pool to use.
|
||||
The SentinelConnectionPool will be used by default.
|
||||
|
||||
All other keyword arguments are merged with any connection_kwargs
|
||||
passed to this class and passed to the connection pool as keyword
|
||||
arguments to be used to initialize Redis connections.
|
||||
"""
|
||||
kwargs['is_master'] = False
|
||||
connection_kwargs = dict(self.connection_kwargs)
|
||||
connection_kwargs.update(kwargs)
|
||||
return redis_class(connection_pool=connection_pool_class(
|
||||
service_name, self, **connection_kwargs))
|
33
infrastructure/lambda/task_queue_manager/redis/utils.py
Normal file
33
infrastructure/lambda/task_queue_manager/redis/utils.py
Normal file
@ -0,0 +1,33 @@
|
||||
from contextlib import contextmanager
|
||||
|
||||
|
||||
try:
|
||||
import hiredis # noqa
|
||||
HIREDIS_AVAILABLE = True
|
||||
except ImportError:
|
||||
HIREDIS_AVAILABLE = False
|
||||
|
||||
|
||||
def from_url(url, db=None, **kwargs):
|
||||
"""
|
||||
Returns an active Redis client generated from the given database URL.
|
||||
|
||||
Will attempt to extract the database id from the path url fragment, if
|
||||
none is provided.
|
||||
"""
|
||||
from redis.client import Redis
|
||||
return Redis.from_url(url, db, **kwargs)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def pipeline(redis_obj):
|
||||
p = redis_obj.pipeline()
|
||||
yield p
|
||||
p.execute()
|
||||
|
||||
|
||||
class dummy(object):
|
||||
"""
|
||||
Instances of this class can be used as an attribute container.
|
||||
"""
|
||||
pass
|
@ -0,0 +1 @@
|
||||
redis
|
Reference in New Issue
Block a user