diff --git a/.gitignore b/.gitignore index 603f23c..f5aea4c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ *.pyc nohup.out +*.rdb diff --git a/lib/redis b/lib/redis new file mode 120000 index 0000000..e66eec1 --- /dev/null +++ b/lib/redis @@ -0,0 +1 @@ +../src/andymccurdy-redis-py-7112f5b/redis/ \ No newline at end of file diff --git a/muxlist/mix/urls.py b/muxlist/mix/urls.py index f121dde..6ea642d 100644 --- a/muxlist/mix/urls.py +++ b/muxlist/mix/urls.py @@ -2,6 +2,7 @@ urlpatterns = patterns('muxlist.mix.views', (r'^(?P.*)/add/$', 'add_message'), - (r'^(?P.*)/song/$', 'add_song'), + (r'^(?P.*)/next/$', 'next_song'), + (r'^(?P.*)/current/$', 'current_song'), (r'^(?P.*)/$', 'index'), ) diff --git a/muxlist/mix/views.py b/muxlist/mix/views.py index d1ff626..3289d81 100644 --- a/muxlist/mix/views.py +++ b/muxlist/mix/views.py @@ -1,4 +1,4 @@ -from django.http import HttpResponse, HttpResponseRedirect +from django.http import HttpResponse, HttpResponseRedirect, Http404 from django.shortcuts import render_to_response, get_object_or_404 from muxlist.mix.models import Group from muxlist.music.forms import UploadForm @@ -9,6 +9,104 @@ import stomp import json import time +import redis + +from django.contrib.auth.models import User + +from muxlist.music.signals import track_uploaded +from django.dispatch import receiver + +def debug_enqueue(group_name, user, track): + conn = stomp.Connection() + conn.start() + conn.connect() + conn.subscribe(destination='/mix/%s' % group_name, ack='auto') + msg = json.dumps({'type': 'debug', 'msg': '%s enqueued %s' % (user, track)}) + conn.send(msg, destination='/mix/%s' % group_name) + +def send_current_song(group_name, user): + print "send current song to %s" % user + r = redis.Redis(host='localhost', port=6379, db=0) + + track = Track.objects.get(id=r.get('%s_current' % group_name)) + playing_user = User.objects.get(id=r.get('%s_current_user' % group_name)) + print "it's %s" % track + + conn = stomp.Connection() + conn.start() + conn.connect() + conn.subscribe(destination='/user/%s' % user.id, ack='auto') + msg = json.dumps({'type': 'song', 'user': playing_user.username, 'artist': track.artist.name, 'title': track.title, 'url': track.get_location().url}) + conn.send(msg, destination='/user/%s' % user.id) + +def send_next_song(group_name): + print "send next song" + r = redis.Redis(host='localhost', port=6379, db=0) + user_count = r.scard('%s_users' % group_name) + if user_count == 0: + print "no tracks to enqueue" + return + + user_id = None + track_id = None + + # loop until we find a track + while track_id == None: + user_id = r.spop('%s_users' % group_name) + print "Got user id %s, has %s queued" % (user_id, r.llen('%s_%s_queue' % (group_name, user_id))) + if user_id == None: + return # no more users, we're kaput + + track_id = r.lpop('%s_%s_queue' % (group_name, user_id)) + print "Got track id %s" % track_id + if track_id != None: + r.sadd('%s_users' % group_name, user_id) + + r.set('%s_current' % group_name, track_id) + r.set('%s_current_user' % group_name, user_id) + r.expire('%s_current' % group_name, 30) + + user = User.objects.get(id=user_id) + track = Track.objects.get(id=track_id) + print "Going with %s from %s" % (track, user) + + conn = stomp.Connection() + conn.start() + conn.connect() + conn.subscribe(destination='/mix/%s' % group_name, ack='auto') + msg = json.dumps({'type': 'song', 'user': user.username, 'artist': track.artist.name, 'title': track.title, 'url': track.get_location().url}) + conn.send(msg, destination='/mix/%s' % group_name) + +@receiver(track_uploaded, sender=None) +def tu(sender, **kwargs): + group_name = 'test' + user = kwargs['user'] + track = kwargs['track'] + + r = redis.Redis(host='localhost', port=6379, db=0) + r.rpush('%s_%s_queue' % (group_name, user.id), track.id) + debug_enqueue(group_name, user, track) + r.sadd('%s_users' % group_name, user.id) + if (not r.exists('%s_current' % group_name)): + send_next_song(group_name) + +def next_song(request, group_name): + r = redis.Redis(host='localhost', port=6379, db=0) + if r.ttl('%s_current' % group_name) > 0: + print "ttl = %s" % r.ttl('%s_current' % group_name) + raise Http404() + + send_next_song(group_name) + + return HttpResponse() + +def current_song(request, group_name): + r = redis.Redis(host='localhost', port=6379, db=0) + send_current_song(group_name, request.user) + return HttpResponse() + +def force_next(request, group_name): + pass def index(request, group_name): group = get_object_or_404(Group, name=group_name) @@ -26,14 +124,3 @@ def add_message(request, group_name): msg = json.dumps({"type": "chat", "user": request.user.username,"message":request.REQUEST.get('msg', '(blank)'), "time":time.strftime("%H:%S-%d/%m/%Y")}) conn.send(msg, destination='/mix/%s' % group_name) return HttpResponse('ok') - -def add_song(request, group_name): - conn = stomp.Connection() - conn.start() - conn.connect() - conn.subscribe(destination='/mix/%s' % group_name, ack='auto') - track = Track.objects.get(id=request.REQUEST['id']) - msg = json.dumps({'type': 'song', 'user': request.user.username, 'artist': track.artist.name, 'title': track.title, 'url': track.get_location().url}) - conn.send(msg, destination='/mix/%s' % group_name) - #r.lpush('mix_%s' % group_name, json.dumps([track.id, str(track.__unicode__())])) - return HttpResponse('ok') diff --git a/muxlist/music/signals.py b/muxlist/music/signals.py new file mode 100644 index 0000000..e9af94b --- /dev/null +++ b/muxlist/music/signals.py @@ -0,0 +1,3 @@ +import django.dispatch + +track_uploaded = django.dispatch.Signal(providing_args=['track', 'group', 'user']) diff --git a/muxlist/music/views.py b/muxlist/music/views.py index 46a4536..0fafb62 100644 --- a/muxlist/music/views.py +++ b/muxlist/music/views.py @@ -9,6 +9,8 @@ from muxlist.music.decorators import move_rawdata_to_files from muxlist.music.models import TrackLocation +from muxlist.music.signals import track_uploaded + @login_required def index(request): profile = request.user.get_profile() @@ -25,7 +27,8 @@ def begin_slice(request): try: tl = TrackLocation.objects.get(begin_hash=begin_hash, size=request.META['HTTP_X_FILE_SIZE']) if request.user in tl.track.uploaded_by.all(): - return HttpResponse(tl.track.id) + track_uploaded.send(sender=None, track=tl.track, group=form.cleaned_data['group'], user=request.user) + return HttpResponse() except TrackLocation.DoesNotExist: return HttpResponse(status=404) request.session['begin_hash'] = begin_hash @@ -46,7 +49,8 @@ def middle_slice(request): try: tl = TrackLocation.objects.get(begin_hash=begin_hash, middle_hash=middle_hash, size=request.META['HTTP_X_FILE_SIZE']) if request.user in tl.track.uploaded_by.all(): - return HttpResponse(tl.track.id) + track_uploaded.send(sender=None, track=tl.track, group=form.cleaned_data['group'], user=request.user) + return HttpResponse() except TrackLocation.DoesNotExist: return HttpResponse(status=404) request.session['middle_hash'] = middle_hash @@ -67,7 +71,8 @@ def end_slice(request): print "size = %s, begin_hash=%s, middle_hash = %s, end_hash = %s" % (request.META['HTTP_X_FILE_SIZE'], begin_hash, middle_hash, end_hash) try: tl = TrackLocation.objects.get(begin_hash=begin_hash, middle_hash=middle_hash, end_hash=end_hash, size=request.META['HTTP_X_FILE_SIZE']) - return HttpResponse(tl.track.id) + track_uploaded.send(sender=None, track=tl.track, group=form.cleaned_data['group'], user=request.user) + return HttpResponse() except TrackLocation.DoesNotExist: pass return HttpResponse(status=404) else: @@ -81,7 +86,8 @@ def upload(request): form = UploadForm(data=request.POST, files=request.FILES) if form.is_valid(): track = form.save(user=request.user) - return HttpResponse(track.id) + track_uploaded.send(sender=None, track=track, group=form.cleaned_data['group'], user=request.user) + return HttpResponse() else: return HttpResponse("invalid: %s" % (', '.join(form.errors)), status=500) else: diff --git a/src/andymccurdy-redis-py-7112f5b/.gitignore b/src/andymccurdy-redis-py-7112f5b/.gitignore new file mode 100644 index 0000000..0ed6db7 --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/.gitignore @@ -0,0 +1,5 @@ +*.pyc +redis.egg-info +build/ +dist/ +dump.rdb diff --git a/src/andymccurdy-redis-py-7112f5b/CHANGES b/src/andymccurdy-redis-py-7112f5b/CHANGES new file mode 100644 index 0000000..4a7c8d1 --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/CHANGES @@ -0,0 +1,19 @@ +* 2.2.2 + * Fixed a bug in ZREVRANK where retriving the rank of a value not in + the zset would raise an error. + * Fixed a bug in Connection.send where the errno import was getting + overwritten by a local variable. + * Fixed a bug in SLAVEOF when promoting an existing slave to a master. + * Reverted change of download URL back to redis-VERSION.tar.gz. 2.2.1's + change of this actually broke Pypi for Pip installs. Sorry! +* 2.2.1 + * Changed archive name to redis-py-VERSION.tar.gz to not conflict + with the Redis server archive. +* 2.2.0 + * Implemented SLAVEOF + * Implemented CONFIG as config_get and config_set + * Implemented GETBIT/SETBIT + * Implemented BRPOPLPUSH + * Implemented STRLEN + * Implemented PERSIST + * Implemented SETRANGE diff --git a/src/andymccurdy-redis-py-7112f5b/INSTALL b/src/andymccurdy-redis-py-7112f5b/INSTALL new file mode 100644 index 0000000..951f7de --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/INSTALL @@ -0,0 +1,6 @@ + +Please use + python setup.py install + +and report errors to Andy McCurdy (sedrik@gmail.com) + diff --git a/src/andymccurdy-redis-py-7112f5b/LICENSE b/src/andymccurdy-redis-py-7112f5b/LICENSE new file mode 100644 index 0000000..073b05c --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2010 Andy McCurdy + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/andymccurdy-redis-py-7112f5b/MANIFEST.in b/src/andymccurdy-redis-py-7112f5b/MANIFEST.in new file mode 100644 index 0000000..1b2fcd9 --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/MANIFEST.in @@ -0,0 +1,4 @@ +include CHANGES +include INSTALL +include LICENSE +include README.md diff --git a/src/andymccurdy-redis-py-7112f5b/README.md b/src/andymccurdy-redis-py-7112f5b/README.md new file mode 100644 index 0000000..aa0cef3 --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/README.md @@ -0,0 +1,498 @@ +redis-py +======== + +This is the Python interface to the Redis key-value store. + + +Usage +----- + + >>> import redis + >>> r = redis.Redis(host='localhost', port=6379, db=0) + >>> r.set('foo', 'bar') # or r['foo'] = 'bar' + True + >>> r.get('foo') # or r['foo'] + 'bar' + +For a complete list of commands, check out the list of Redis commands here: +http://code.google.com/p/redis/wiki/CommandReference + +Installation +------------ + $ sudo pip install redis + +alternatively: + + $ sudo easy_install redis + +From sources: + + $ sudo python setup.py install + +Versioning scheme +----------------- + +redis-py is versioned after Redis. So, for example, redis-py 2.0.0 should +support all the commands available in Redis 2.0.0. + +API Reference +------------- + +### append(self, key, value) + Appends the string _value_ to the value at _key_. If _key_ + doesn't already exist, create it with a value of _value_. + Returns the new length of the value at _key_. + +### bgrewriteaof(self) + Tell the Redis server to rewrite the AOF file from data in memory. + +### bgsave(self) + Tell the Redis server to save its data to disk. Unlike save(), + this method is asynchronous and returns immediately. + +### blpop(self, keys, timeout=0) + LPOP a value off of the first non-empty list + named in the _keys_ list. + + If none of the lists in _keys_ has a value to LPOP, then block + for _timeout_ seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + +### brpop(self, keys, timeout=0) + RPOP a value off of the first non-empty list + named in the _keys_ list. + + If none of the lists in _keys_ has a value to LPOP, then block + for _timeout_ seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + +### dbsize(self) + Returns the number of keys in the current database + +### decr(self, name, amount=1) + Decrements the value of _key_ by _amount_. If no key exists, + the value will be initialized as 0 - _amount_ + +### delete(self, *names) + Delete one or more keys specified by _names_ + +### encode(self, value) + Encode _value_ using the instance's charset + +### execute_command(self, *args, **options) + Sends the command to the redis server and returns it's response + +### exists(self, name) + Returns a boolean indicating whether key _name_ exists + +### expire(self, name, time) + Set an expire flag on key _name_ for _time_ seconds + +### expireat(self, name, when) + Set an expire flag on key _name_. _when_ can be represented + as an integer indicating unix time or a Python datetime object. + +### flush(self, all_dbs=False) + +### flushall(self) + Delete all keys in all databases on the current host + +### flushdb(self) + Delete all keys in the current database + +### get(self, name) + Return the value at key _name_, or None of the key doesn't exist + +### get_connection(self, host, port, db, password, socket_timeout) + Returns a connection object + +### getset(self, name, value) + Set the value at key _name_ to _value_ if key doesn't exist + Return the value at key _name_ atomically + +### hdel(self, name, key) + Delete _key_ from hash _name_ + +### hexists(self, name, key) + Returns a boolean indicating if _key_ exists within hash _name_ + +### hget(self, name, key) + Return the value of _key_ within the hash _name_ + +### hgetall(self, name) + Return a Python dict of the hash's name/value pairs + +### hincrby(self, name, key, amount=1) + Increment the value of _key_ in hash _name_ by _amount_ + +### hkeys(self, name) + Return the list of keys within hash _name_ + +### hlen(self, name) + Return the number of elements in hash _name_ + +### hmget(self, name, keys) + Returns a list of values ordered identically to _keys_ + +### hmset(self, name, mapping) + Sets each key in the _mapping_ dict to its corresponding value + in the hash _name_ + +### hset(self, name, key, value) + Set _key_ to _value_ within hash _name_ + Returns 1 if HSET created a new field, otherwise 0 + +### hsetnx(self, name, key, value) + Set _key_ to _value_ within hash _name_ if _key_ does not + exist. Returns 1 if HSETNX created a field, otherwise 0. + +### hvals(self, name) + Return the list of values within hash _name_ + +### incr(self, name, amount=1) + Increments the value of _key_ by _amount_. If no key exists, + the value will be initialized as _amount_ + +### info(self) + Returns a dictionary containing information about the Redis server + +### keys(self, pattern='*') + Returns a list of keys matching _pattern_ + +### lastsave(self) + Return a Python datetime object representing the last time the + Redis database was saved to disk + +### lindex(self, name, index) + Return the item from list _name_ at position _index_ + + Negative indexes are supported and will return an item at the + end of the list + +### listen(self) + Listen for messages on channels this client has been subscribed to + +### llen(self, name) + Return the length of the list _name_ + +###lock(self, name, timeout=None, sleep=0.10000000000000001) + Return a new Lock object using key _name_ that mimics + the behavior of threading.Lock. + + If specified, _timeout_ indicates a maximum life for the lock. + By default, it will remain locked until release() is called. + + _sleep_ indicates the amount of time to sleep per loop iteration + when the lock is in blocking mode and another client is currently + holding the lock. + +### lpop(self, name) + Remove and return the first item of the list _name_ + +### lpush(self, name, value) + Push _value_ onto the head of the list _name_ + +### lrange(self, name, start, end) + Return a slice of the list _name_ between + position _start_ and _end_ + + _start_ and _end_ can be negative numbers just like + Python slicing notation + +### lrem(self, name, value, num=0) + Remove the first _num_ occurrences of _value_ from list _name_ + + If _num_ is 0, then all occurrences will be removed + +### lset(self, name, index, value) + Set _position_ of list _name_ to _value_ + +### ltrim(self, name, start, end) + Trim the list _name_, removing all values not within the slice + between _start_ and _end_ + + _start_ and _end_ can be negative numbers just like + Python slicing notation + +### mget(self, keys, *args) + Returns a list of values ordered identically to _keys_ + + * Passing *args to this method has been deprecated * + +### move(self, name, db) + Moves the key _name_ to a different Redis database _db_ + +### mset(self, mapping) + Sets each key in the _mapping_ dict to its corresponding value + +### msetnx(self, mapping) + Sets each key in the _mapping_ dict to its corresponding value if + none of the keys are already set + +### parse_response(self, command_name, catch_errors=False, **options) + Parses a response from the Redis server + +### ping(self) + Ping the Redis server + +### pipeline(self, transaction=True) + Return a new pipeline object that can queue multiple commands for + later execution. _transaction_ indicates whether all commands + should be executed atomically. Apart from multiple atomic operations, + pipelines are useful for batch loading of data as they reduce the + number of back and forth network operations between client and server. + +### pop(self, name, tail=False) + Pop and return the first or last element of list _name_ + + This method has been deprecated, use _Redis.lpop_ or _Redis.rpop_ instead. + +### psubscribe(self, patterns) + Subscribe to all channels matching any pattern in _patterns_ + +### publish(self, channel, message) + Publish _message_ on _channel_. + Returns the number of subscribers the message was delivered to. + +### punsubscribe(self, patterns=[]) + Unsubscribe from any channel matching any pattern in _patterns_. + If empty, unsubscribe from all channels. + +### push(self, name, value, head=False) + Push _value_ onto list _name_. + + This method has been deprecated, use __Redis.lpush__ or __Redis.rpush__ instead. + +### randomkey(self) + Returns the name of a random key + +### rename(self, src, dst, **kwargs) + Rename key _src_ to _dst_ + + * The following flags have been deprecated * + If _preserve_ is True, rename the key only if the destination name + doesn't already exist + +### renamenx(self, src, dst) + Rename key _src_ to _dst_ if _dst_ doesn't already exist + +### rpop(self, name) + Remove and return the last item of the list _name_ + +### rpoplpush(self, src, dst) + RPOP a value off of the _src_ list and atomically LPUSH it + on to the _dst_ list. Returns the value. + +### rpush(self, name, value) + Push _value_ onto the tail of the list _name_ + +### sadd(self, name, value) + Add _value_ to set _name_ + +### save(self) + Tell the Redis server to save its data to disk, + blocking until the save is complete + +### scard(self, name) + Return the number of elements in set _name_ + +### sdiff(self, keys, *args) + Return the difference of sets specified by _keys_ + +### sdiffstore(self, dest, keys, *args) + Store the difference of sets specified by _keys_ into a new + set named _dest_. Returns the number of keys in the new set. + +### select(self, db, host=None, port=None, password=None, socket_timeout=None) + Switch to a different Redis connection. + + If the host and port aren't provided and there's an existing + connection, use the existing connection's host and port instead. + + Note this method actually replaces the underlying connection object + prior to issuing the SELECT command. This makes sure we protect + the thread-safe connections + +### set(self, name, value, **kwargs) + Set the value at key _name_ to _value_ + + * The following flags have been deprecated * + If _preserve_ is True, set the value only if key doesn't already + exist + If _getset_ is True, set the value only if key doesn't already exist + and return the resulting value of key + +### setex(self, name, value, time) + Set the value of key _name_ to _value_ + that expires in _time_ seconds + +### setnx(self, name, value) + Set the value of key _name_ to _value_ if key doesn't exist + +### sinter(self, keys, *args) + Return the intersection of sets specified by _keys_ + +### sinterstore(self, dest, keys, *args) + Store the intersection of sets specified by _keys_ into a new + set named _dest_. Returns the number of keys in the new set. + +### sismember(self, name, value) + Return a boolean indicating if _value_ is a member of set _name_ + +### smembers(self, name) + Return all members of the set _name_ + +### smove(self, src, dst, value) + Move _value_ from set _src_ to set _dst_ atomically + +### sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha=False, store=None) + Sort and return the list, set or sorted set at _name_. + + _start_ and _num_ allow for paging through the sorted data + + _by_ allows using an external key to weight and sort the items. + Use an "*" to indicate where in the key the item value is located + + _get_ allows for returning items from external keys rather than the + sorted data itself. Use an "*" to indicate where int he key + the item value is located + + _desc_ allows for reversing the sort + + _alpha_ allows for sorting lexicographically rather than numerically + + _store_ allows for storing the result of the sort into + the key _store_ + +### spop(self, name) + Remove and return a random member of set _name_ + +### srandmember(self, name) + Return a random member of set _name_ + +### srem(self, name, value) + Remove _value_ from set _name_ + +### subscribe(self, channels) + Subscribe to _channels_, waiting for messages to be published + +### substr(self, name, start, end=-1) + Return a substring of the string at key _name_. _start_ and _end_ + are 0-based integers specifying the portion of the string to return. + +### sunion(self, keys, *args) + Return the union of sets specifiued by _keys_ + +### sunionstore(self, dest, keys, *args) + Store the union of sets specified by _keys_ into a new + set named _dest_. Returns the number of keys in the new set. + +### ttl(self, name) + Returns the number of seconds until the key _name_ will expire + +### type(self, name) + Returns the type of key _name_ + +### unsubscribe(self, channels=[]) + Unsubscribe from _channels_. If empty, unsubscribe + from all channels + +### watch(self, name): + Watches the value at key _name_. + +### zadd(self, name, value, score) + Add member _value_ with score _score_ to sorted set _name_ + +### zcard(self, name) + Return the number of elements in the sorted set _name_ + +### zincr(self, key, member, value=1) + This has been deprecated, use zincrby instead + +### zincrby(self, name, value, amount=1) + Increment the score of _value_ in sorted set _name_ by _amount_ + +### zinter(self, dest, keys, aggregate=None) + +###zinterstore(self, dest, keys, aggregate=None) + Intersect multiple sorted sets specified by _keys_ into + a new sorted set, _dest_. Scores in the destination will be + aggregated based on the _aggregate_, or SUM if none is provided. + +### zrange(self, name, start, end, desc=False, withscores=False) + Return a range of values from sorted set _name_ between + _start_ and _end_ sorted in ascending order. + + _start_ and _end_ can be negative, indicating the end of the range. + + _desc_ indicates to sort in descending order. + + _withscores_ indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + +### zrangebyscore(self, name, min, max, start=None, num=None, withscores=False) + Return a range of values from the sorted set _name_ with scores + between _min_ and _max_. + + If _start_ and _num_ are specified, then return a slice of the range. + + _withscores_ indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + +### zrank(self, name, value) + Returns a 0-based value indicating the rank of _value_ in sorted set + _name_ + +### zrem(self, name, value) + Remove member _value_ from sorted set _name_ + +### zremrangebyrank(self, name, min, max) + Remove all elements in the sorted set _name_ with ranks between + _min_ and _max_. Values are 0-based, ordered from smallest score + to largest. Values can be negative indicating the highest scores. + Returns the number of elements removed + +### zremrangebyscore(self, name, min, max) + Remove all elements in the sorted set _name_ with scores + between _min_ and _max_. Returns the number of elements removed. + +### zrevrange(self, name, start, num, withscores=False) + Return a range of values from sorted set _name_ between + _start_ and _num_ sorted in descending order. + + _start_ and _num_ can be negative, indicating the end of the range. + + _withscores_ indicates to return the scores along with the values + as a dictionary of value => score + +### zrevrank(self, name, value) + Returns a 0-based value indicating the descending rank of + _value_ in sorted set _name_ + +### zscore(self, name, value) + Return the score of element _value_ in sorted set _name_ + +### zunion(self, dest, keys, aggregate=None) + +### zunionstore(self, dest, keys, aggregate=None) + Union multiple sorted sets specified by _keys_ into + a new sorted set, _dest_. Scores in the destination will be + aggregated based on the _aggregate_, or SUM if none is provided. + +Author +------ + +redis-py is developed and maintained by Andy McCurdy (sedrik@gmail.com). +It can be found here: http://github.com/andymccurdy/redis-py + +Special thanks to: + +* Ludovico Magnocavallo, author of the original Python Redis client, from + which some of the socket code is still used. +* Alexander Solovyov for ideas on the generic response callback system. +* Paul Hubbard for initial packaging support. + diff --git a/src/andymccurdy-redis-py-7112f5b/redis/__init__.py b/src/andymccurdy-redis-py-7112f5b/redis/__init__.py new file mode 100644 index 0000000..437f20c --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/redis/__init__.py @@ -0,0 +1,12 @@ +# legacy imports +from redis.client import Redis, ConnectionPool +from redis.exceptions import RedisError, ConnectionError, AuthenticationError +from redis.exceptions import ResponseError, InvalidResponse, InvalidData + +__version__ = '2.2.2' + +__all__ = [ + 'Redis', 'ConnectionPool', + 'RedisError', 'ConnectionError', 'ResponseError', 'AuthenticationError' + 'InvalidResponse', 'InvalidData', + ] diff --git a/src/andymccurdy-redis-py-7112f5b/redis/client.py b/src/andymccurdy-redis-py-7112f5b/redis/client.py new file mode 100644 index 0000000..9ba3547 --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/redis/client.py @@ -0,0 +1,1546 @@ +import datetime +import errno +import socket +import threading +import time +import warnings +from itertools import chain, imap +from redis.exceptions import ConnectionError, ResponseError, InvalidResponse, WatchError +from redis.exceptions import RedisError, AuthenticationError + + +class ConnectionPool(threading.local): + "Manages a list of connections on the local thread" + def __init__(self): + self.connections = {} + + def make_connection_key(self, host, port, db): + "Create a unique key for the specified host, port and db" + return '%s:%s:%s' % (host, port, db) + + def get_connection(self, host, port, db, password, socket_timeout): + "Return a specific connection for the specified host, port and db" + key = self.make_connection_key(host, port, db) + if key not in self.connections: + self.connections[key] = Connection( + host, port, db, password, socket_timeout) + return self.connections[key] + + def get_all_connections(self): + "Return a list of all connection objects the manager knows about" + return self.connections.values() + + +class Connection(object): + "Manages TCP communication to and from a Redis server" + def __init__(self, host='localhost', port=6379, db=0, password=None, + socket_timeout=None): + self.host = host + self.port = port + self.db = db + self.password = password + self.socket_timeout = socket_timeout + self._sock = None + self._fp = None + + def connect(self, redis_instance): + "Connects to the Redis server if not already connected" + if self._sock: + return + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.settimeout(self.socket_timeout) + sock.connect((self.host, self.port)) + except socket.error, e: + # args for socket.error can either be (errno, "message") + # or just "message" + if len(e.args) == 1: + error_message = "Error connecting to %s:%s. %s." % \ + (self.host, self.port, e.args[0]) + else: + error_message = "Error %s connecting %s:%s. %s." % \ + (e.args[0], self.host, self.port, e.args[1]) + raise ConnectionError(error_message) + sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) + self._sock = sock + self._fp = sock.makefile('r') + redis_instance._setup_connection() + + def disconnect(self): + "Disconnects from the Redis server" + if self._sock is None: + return + try: + self._sock.close() + except socket.error: + pass + self._sock = None + self._fp = None + + def send(self, command, redis_instance): + "Send ``command`` to the Redis server. Return the result." + self.connect(redis_instance) + try: + self._sock.sendall(command) + except socket.error, e: + if e.args[0] == errno.EPIPE: + self.disconnect() + if isinstance(e.args, basestring): + _errno, errmsg = 'UNKNOWN', e.args + else: + _errno, errmsg = e.args + raise ConnectionError("Error %s while writing to socket. %s." % \ + (_errno, errmsg)) + + def read(self, length=None): + """ + Read a line from the socket is length is None, + otherwise read ``length`` bytes + """ + try: + if length is not None: + return self._fp.read(length) + return self._fp.readline() + except socket.error, e: + self.disconnect() + if e.args and e.args[0] == errno.EAGAIN: + raise ConnectionError("Error while reading from socket: %s" % \ + e.args[1]) + return '' + +def list_or_args(command, keys, args): + # returns a single list combining keys and args + # if keys is not a list or args has items, issue a + # deprecation warning + oldapi = bool(args) + try: + i = iter(keys) + # a string can be iterated, but indicates + # keys wasn't passed as a list + if isinstance(keys, basestring): + oldapi = True + except TypeError: + oldapi = True + keys = [keys] + if oldapi: + warnings.warn(DeprecationWarning( + "Passing *args to Redis.%s has been deprecated. " + "Pass an iterable to ``keys`` instead" % command + )) + keys.extend(args) + return keys + +def timestamp_to_datetime(response): + "Converts a unix timestamp to a Python datetime object" + if not response: + return None + try: + response = int(response) + except ValueError: + return None + return datetime.datetime.fromtimestamp(response) + +def string_keys_to_dict(key_string, callback): + return dict([(key, callback) for key in key_string.split()]) + +def dict_merge(*dicts): + merged = {} + [merged.update(d) for d in dicts] + return merged + +def parse_info(response): + "Parse the result of Redis's INFO command into a Python dict" + info = {} + def get_value(value): + if ',' not in value: + return value + sub_dict = {} + for item in value.split(','): + k, v = item.split('=') + try: + sub_dict[k] = int(v) + except ValueError: + sub_dict[k] = v + return sub_dict + for line in response.splitlines(): + key, value = line.split(':') + try: + info[key] = int(value) + except ValueError: + info[key] = get_value(value) + return info + +def pairs_to_dict(response): + "Create a dict given a list of key/value pairs" + return dict(zip(response[::2], response[1::2])) + +def zset_score_pairs(response, **options): + """ + If ``withscores`` is specified in the options, return the response as + a list of (value, score) pairs + """ + if not response or not options['withscores']: + return response + return zip(response[::2], map(float, response[1::2])) + +def int_or_none(response): + if response is None: + return None + return int(response) + +def float_or_none(response): + if response is None: + return None + return float(response) + +def parse_config(response, **options): + # this is stupid, but don't have a better option right now + if options['parse'] == 'GET': + return response and pairs_to_dict(response) or {} + return response == 'OK' + +class Redis(threading.local): + """ + Implementation of the Redis protocol. + + This abstract class provides a Python interface to all Redis commands + and an implementation of the Redis protocol. + + Connection and Pipeline derive from this, implementing how + the commands are sent and received to the Redis server + """ + RESPONSE_CALLBACKS = dict_merge( + string_keys_to_dict( + 'AUTH DEL EXISTS EXPIRE EXPIREAT HDEL HEXISTS HMSET MOVE MSETNX ' + 'PERSIST RENAMENX SADD SISMEMBER SMOVE SETEX SETNX SREM ZADD ZREM', + bool + ), + string_keys_to_dict( + 'DECRBY GETBIT HLEN INCRBY LINSERT LLEN LPUSHX RPUSHX SCARD ' + 'SDIFFSTORE SETBIT SETRANGE SINTERSTORE STRLEN SUNIONSTORE ZCARD ' + 'ZREMRANGEBYRANK ZREMRANGEBYSCORE', + int + ), + string_keys_to_dict( + # these return OK, or int if redis-server is >=1.3.4 + 'LPUSH RPUSH', + lambda r: isinstance(r, int) and r or r == 'OK' + ), + string_keys_to_dict('ZSCORE ZINCRBY', float_or_none), + string_keys_to_dict( + 'FLUSHALL FLUSHDB LSET LTRIM MSET RENAME ' + 'SAVE SELECT SET SHUTDOWN SLAVEOF WATCH UNWATCH', + lambda r: r == 'OK' + ), + string_keys_to_dict('BLPOP BRPOP', lambda r: r and tuple(r) or None), + string_keys_to_dict('SDIFF SINTER SMEMBERS SUNION', + lambda r: r and set(r) or set() + ), + string_keys_to_dict('ZRANGE ZRANGEBYSCORE ZREVRANGE', zset_score_pairs), + string_keys_to_dict('ZRANK ZREVRANK', int_or_none), + { + 'BGREWRITEAOF': lambda r: \ + r == 'Background rewriting of AOF file started', + 'BGSAVE': lambda r: r == 'Background saving started', + 'BRPOPLPUSH': lambda r: r and r or None, + 'CONFIG': parse_config, + 'HGETALL': lambda r: r and pairs_to_dict(r) or {}, + 'INFO': parse_info, + 'LASTSAVE': timestamp_to_datetime, + 'PING': lambda r: r == 'PONG', + 'RANDOMKEY': lambda r: r and r or None, + 'TTL': lambda r: r != -1 and r or None, + } + ) + + # commands that should NOT pull data off the network buffer when executed + SUBSCRIPTION_COMMANDS = set([ + 'SUBSCRIBE', 'UNSUBSCRIBE', 'PSUBSCRIBE', 'PUNSUBSCRIBE' + ]) + + def __init__(self, host='localhost', port=6379, + db=0, password=None, socket_timeout=None, + connection_pool=None, + charset='utf-8', errors='strict'): + self.encoding = charset + self.errors = errors + self.connection = None + self.subscribed = False + self.connection_pool = connection_pool and connection_pool or ConnectionPool() + self.select(db, host, port, password, socket_timeout) + + #### Legacty accessors of connection information #### + def _get_host(self): + return self.connection.host + host = property(_get_host) + + def _get_port(self): + return self.connection.port + port = property(_get_port) + + def _get_db(self): + return self.connection.db + db = property(_get_db) + + def pipeline(self, transaction=True): + """ + Return a new pipeline object that can queue multiple commands for + later execution. ``transaction`` indicates whether all commands + should be executed atomically. Apart from multiple atomic operations, + pipelines are useful for batch loading of data as they reduce the + number of back and forth network operations between client and server. + """ + return Pipeline( + self.connection, + transaction, + self.encoding, + self.errors + ) + + def lock(self, name, timeout=None, sleep=0.1): + """ + Return a new Lock object using key ``name`` that mimics + the behavior of threading.Lock. + + If specified, ``timeout`` indicates a maximum life for the lock. + By default, it will remain locked until release() is called. + + ``sleep`` indicates the amount of time to sleep per loop iteration + when the lock is in blocking mode and another client is currently + holding the lock. + """ + return Lock(self, name, timeout=timeout, sleep=sleep) + + #### COMMAND EXECUTION AND PROTOCOL PARSING #### + def _execute_command(self, command_name, command, **options): + subscription_command = command_name in self.SUBSCRIPTION_COMMANDS + if self.subscribed and not subscription_command: + raise RedisError("Cannot issue commands other than SUBSCRIBE and " + "UNSUBSCRIBE while channels are open") + try: + self.connection.send(command, self) + if subscription_command: + return None + return self.parse_response(command_name, **options) + except ConnectionError: + self.connection.disconnect() + self.connection.send(command, self) + if subscription_command: + return None + return self.parse_response(command_name, **options) + + def execute_command(self, *args, **options): + "Sends the command to the redis server and returns it's response" + cmds = ['$%s\r\n%s\r\n' % (len(enc_value), enc_value) + for enc_value in imap(self.encode, args)] + return self._execute_command( + args[0], + '*%s\r\n%s' % (len(cmds), ''.join(cmds)), + **options + ) + + def _parse_response(self, command_name, catch_errors): + conn = self.connection + response = conn.read()[:-2] # strip last two characters (\r\n) + if not response: + self.connection.disconnect() + raise ConnectionError("Socket closed on remote end") + + # server returned a null value + if response in ('$-1', '*-1'): + return None + byte, response = response[0], response[1:] + + # server returned an error + if byte == '-': + if response.startswith('ERR '): + response = response[4:] + raise ResponseError(response) + # single value + elif byte == '+': + return response + # int value + elif byte == ':': + return int(response) + # bulk response + elif byte == '$': + length = int(response) + if length == -1: + return None + response = length and conn.read(length) or '' + conn.read(2) # read the \r\n delimiter + return response + # multi-bulk response + elif byte == '*': + length = int(response) + if length == -1: + return None + if not catch_errors: + return [self._parse_response(command_name, catch_errors) + for i in range(length)] + else: + # for pipelines, we need to read everything, + # including response errors. otherwise we'd + # completely mess up the receive buffer + data = [] + for i in range(length): + try: + data.append( + self._parse_response(command_name, catch_errors) + ) + except Exception, e: + data.append(e) + return data + + raise InvalidResponse("Unknown response type for: %s" % command_name) + + def parse_response(self, command_name, catch_errors=False, **options): + "Parses a response from the Redis server" + response = self._parse_response(command_name, catch_errors) + if command_name in self.RESPONSE_CALLBACKS: + return self.RESPONSE_CALLBACKS[command_name](response, **options) + return response + + def encode(self, value): + "Encode ``value`` using the instance's charset" + if isinstance(value, str): + return value + if isinstance(value, unicode): + return value.encode(self.encoding, self.errors) + # not a string or unicode, attempt to convert to a string + return str(value) + + #### CONNECTION HANDLING #### + def get_connection(self, host, port, db, password, socket_timeout): + "Returns a connection object" + conn = self.connection_pool.get_connection( + host, port, db, password, socket_timeout) + # if for whatever reason the connection gets a bad password, make + # sure a subsequent attempt with the right password makes its way + # to the connection + conn.password = password + return conn + + def _setup_connection(self): + """ + After successfully opening a socket to the Redis server, the + connection object calls this method to authenticate and select + the appropriate database. + """ + if self.connection.password: + if not self.execute_command('AUTH', self.connection.password): + raise AuthenticationError("Invalid Password") + self.execute_command('SELECT', self.connection.db) + + def select(self, db, host=None, port=None, password=None, + socket_timeout=None): + """ + Switch to a different Redis connection. + + If the host and port aren't provided and there's an existing + connection, use the existing connection's host and port instead. + + Note this method actually replaces the underlying connection object + prior to issuing the SELECT command. This makes sure we protect + the thread-safe connections + """ + if host is None: + if self.connection is None: + raise RedisError("A valid hostname or IP address " + "must be specified") + host = self.connection.host + if port is None: + if self.connection is None: + raise RedisError("A valid port must be specified") + port = self.connection.port + + self.connection = self.get_connection( + host, port, db, password, socket_timeout) + + def shutdown(self): + "Shutdown the server" + if self.subscribed: + raise RedisError("Can't call 'shutdown' from a pipeline'") + try: + self.execute_command('SHUTDOWN') + except ConnectionError: + # a ConnectionError here is expected + return + raise RedisError("SHUTDOWN seems to have failed.") + + + #### SERVER INFORMATION #### + def bgrewriteaof(self): + "Tell the Redis server to rewrite the AOF file from data in memory." + return self.execute_command('BGREWRITEAOF') + + def bgsave(self): + """ + Tell the Redis server to save its data to disk. Unlike save(), + this method is asynchronous and returns immediately. + """ + return self.execute_command('BGSAVE') + + def config_get(self, pattern="*"): + "Return a dictionary of configuration based on the ``pattern``" + return self.execute_command('CONFIG', 'GET', pattern, parse='GET') + + def config_set(self, name, value): + "Set config item ``name`` with ``value``" + return self.execute_command('CONFIG', 'SET', name, value, parse='SET') + + def dbsize(self): + "Returns the number of keys in the current database" + return self.execute_command('DBSIZE') + + def delete(self, *names): + "Delete one or more keys specified by ``names``" + return self.execute_command('DEL', *names) + __delitem__ = delete + + def flush(self, all_dbs=False): + warnings.warn(DeprecationWarning( + "'flush' has been deprecated. " + "Use Redis.flushdb() or Redis.flushall() instead")) + if all_dbs: + return self.flushall() + return self.flushdb() + + def flushall(self): + "Delete all keys in all databases on the current host" + return self.execute_command('FLUSHALL') + + def flushdb(self): + "Delete all keys in the current database" + return self.execute_command('FLUSHDB') + + def info(self): + "Returns a dictionary containing information about the Redis server" + return self.execute_command('INFO') + + def lastsave(self): + """ + Return a Python datetime object representing the last time the + Redis database was saved to disk + """ + return self.execute_command('LASTSAVE') + + def ping(self): + "Ping the Redis server" + return self.execute_command('PING') + + def save(self): + """ + Tell the Redis server to save its data to disk, + blocking until the save is complete + """ + return self.execute_command('SAVE') + + def slaveof(self, host=None, port=None): + """ + Set the server to be a replicated slave of the instance identified + by the ``host`` and ``port``. If called without arguements, the + instance is promoted to a master instead. + """ + if host is None and port is None: + return self.execute_command("SLAVEOF", "NO", "ONE") + return self.execute_command("SLAVEOF", host, port) + + #### BASIC KEY COMMANDS #### + def append(self, key, value): + """ + Appends the string ``value`` to the value at ``key``. If ``key`` + doesn't already exist, create it with a value of ``value``. + Returns the new length of the value at ``key``. + """ + return self.execute_command('APPEND', key, value) + + def decr(self, name, amount=1): + """ + Decrements the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as 0 - ``amount`` + """ + return self.execute_command('DECRBY', name, amount) + + def exists(self, name): + "Returns a boolean indicating whether key ``name`` exists" + return self.execute_command('EXISTS', name) + __contains__ = exists + + def expire(self, name, time): + "Set an expire flag on key ``name`` for ``time`` seconds" + return self.execute_command('EXPIRE', name, time) + + def expireat(self, name, when): + """ + Set an expire flag on key ``name``. ``when`` can be represented + as an integer indicating unix time or a Python datetime object. + """ + if isinstance(when, datetime.datetime): + when = int(time.mktime(when.timetuple())) + return self.execute_command('EXPIREAT', name, when) + + def get(self, name): + """ + Return the value at key ``name``, or None of the key doesn't exist + """ + return self.execute_command('GET', name) + __getitem__ = get + + def getbit(self, name, offset): + "Returns a boolean indicating the value of ``offset`` in ``name``" + return self.execute_command('GETBIT', name, offset) + + def getset(self, name, value): + """ + Set the value at key ``name`` to ``value`` if key doesn't exist + Return the value at key ``name`` atomically + """ + return self.execute_command('GETSET', name, value) + + def incr(self, name, amount=1): + """ + Increments the value of ``key`` by ``amount``. If no key exists, + the value will be initialized as ``amount`` + """ + return self.execute_command('INCRBY', name, amount) + + def keys(self, pattern='*'): + "Returns a list of keys matching ``pattern``" + return self.execute_command('KEYS', pattern) + + def mget(self, keys, *args): + """ + Returns a list of values ordered identically to ``keys`` + + * Passing *args to this method has been deprecated * + """ + keys = list_or_args('mget', keys, args) + return self.execute_command('MGET', *keys) + + def mset(self, mapping): + "Sets each key in the ``mapping`` dict to its corresponding value" + items = [] + for pair in mapping.iteritems(): + items.extend(pair) + return self.execute_command('MSET', *items) + + def msetnx(self, mapping): + """ + Sets each key in the ``mapping`` dict to its corresponding value if + none of the keys are already set + """ + items = [] + for pair in mapping.iteritems(): + items.extend(pair) + return self.execute_command('MSETNX', *items) + + def move(self, name, db): + "Moves the key ``name`` to a different Redis database ``db``" + return self.execute_command('MOVE', name, db) + + def persist(self, name): + "Removes an expiration on ``name``" + return self.execute_command('PERSIST', name) + + def randomkey(self): + "Returns the name of a random key" + return self.execute_command('RANDOMKEY') + + def rename(self, src, dst, **kwargs): + """ + Rename key ``src`` to ``dst`` + + * The following flags have been deprecated * + If ``preserve`` is True, rename the key only if the destination name + doesn't already exist + """ + if kwargs: + if 'preserve' in kwargs: + warnings.warn(DeprecationWarning( + "preserve option to 'rename' is deprecated, " + "use Redis.renamenx instead")) + if kwargs['preserve']: + return self.renamenx(src, dst) + return self.execute_command('RENAME', src, dst) + + def renamenx(self, src, dst): + "Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist" + return self.execute_command('RENAMENX', src, dst) + + + def set(self, name, value, **kwargs): + """ + Set the value at key ``name`` to ``value`` + + * The following flags have been deprecated * + If ``preserve`` is True, set the value only if key doesn't already + exist + If ``getset`` is True, set the value only if key doesn't already exist + and return the resulting value of key + """ + if kwargs: + if 'getset' in kwargs: + warnings.warn(DeprecationWarning( + "getset option to 'set' is deprecated, " + "use Redis.getset() instead")) + if kwargs['getset']: + return self.getset(name, value) + if 'preserve' in kwargs: + warnings.warn(DeprecationWarning( + "preserve option to 'set' is deprecated, " + "use Redis.setnx() instead")) + if kwargs['preserve']: + return self.setnx(name, value) + return self.execute_command('SET', name, value) + __setitem__ = set + + def setbit(self, name, offset, value): + """ + Flag the ``offset`` in ``name`` as ``value``. Returns a boolean + indicating the previous value of ``offset``. + """ + value = value and 1 or 0 + return self.execute_command('SETBIT', name, offset, value) + + def setex(self, name, value, time): + """ + Set the value of key ``name`` to ``value`` + that expires in ``time`` seconds + """ + return self.execute_command('SETEX', name, time, value) + + def setnx(self, name, value): + "Set the value of key ``name`` to ``value`` if key doesn't exist" + return self.execute_command('SETNX', name, value) + + def setrange(self, name, offset, value): + """ + Overwrite bytes in the value of ``name`` starting at ``offset`` with + ``value``. If ``offset`` plus the length of ``value`` exceeds the + length of the original value, the new value will be larger than before. + If ``offset`` exceeds the length of the original value, null bytes + will be used to pad between the end of the previous value and the start + of what's being injected. + + Returns the length of the new string. + """ + return self.execute_command('SETRANGE', name, offset, value) + + def strlen(self, name): + "Return the number of bytes stored in the value of ``name``" + return self.execute_command('STRLEN', name) + + def substr(self, name, start, end=-1): + """ + Return a substring of the string at key ``name``. ``start`` and ``end`` + are 0-based integers specifying the portion of the string to return. + """ + return self.execute_command('SUBSTR', name, start, end) + + def ttl(self, name): + "Returns the number of seconds until the key ``name`` will expire" + return self.execute_command('TTL', name) + + def type(self, name): + "Returns the type of key ``name``" + return self.execute_command('TYPE', name) + + def watch(self, name): + """ + Watches the value at key ``name``, or None of the key doesn't exist + """ + if self.subscribed: + raise RedisError("Can't call 'watch' from a pipeline'") + + return self.execute_command('WATCH', name) + + def unwatch(self): + """ + Unwatches the value at key ``name``, or None of the key doesn't exist + """ + if self.subscribed: + raise RedisError("Can't call 'unwatch' from a pipeline'") + + return self.execute_command('UNWATCH') + + #### LIST COMMANDS #### + def blpop(self, keys, timeout=0): + """ + LPOP a value off of the first non-empty list + named in the ``keys`` list. + + If none of the lists in ``keys`` has a value to LPOP, then block + for ``timeout`` seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + if isinstance(keys, basestring): + keys = [keys] + else: + keys = list(keys) + keys.append(timeout) + return self.execute_command('BLPOP', *keys) + + def brpop(self, keys, timeout=0): + """ + RPOP a value off of the first non-empty list + named in the ``keys`` list. + + If none of the lists in ``keys`` has a value to LPOP, then block + for ``timeout`` seconds, or until a value gets pushed on to one + of the lists. + + If timeout is 0, then block indefinitely. + """ + if timeout is None: + timeout = 0 + if isinstance(keys, basestring): + keys = [keys] + else: + keys = list(keys) + keys.append(timeout) + return self.execute_command('BRPOP', *keys) + + def brpoplpush(self, src, dst, timeout=0): + """ + Pop a value off the tail of ``src``, push it on the head of ``dst`` + and then return it. + + This command blocks until a value is in ``src`` or until ``timeout`` + seconds elapse, whichever is first. A ``timeout`` value of 0 blocks + forever. + """ + if timeout is None: + timeout = 0 + return self.execute_command('BRPOPLPUSH', src, dst, timeout) + + def lindex(self, name, index): + """ + Return the item from list ``name`` at position ``index`` + + Negative indexes are supported and will return an item at the + end of the list + """ + return self.execute_command('LINDEX', name, index) + + def linsert(self, name, where, refvalue, value): + """ + Insert ``value`` in list ``name`` either immediately before or after + [``where``] ``refvalue`` + + Returns the new length of the list on success or -1 if ``refvalue`` + is not in the list. + """ + return self.execute_command('LINSERT', name, where, refvalue, value) + + def llen(self, name): + "Return the length of the list ``name``" + return self.execute_command('LLEN', name) + + def lpop(self, name): + "Remove and return the first item of the list ``name``" + return self.execute_command('LPOP', name) + + def lpush(self, name, value): + "Push ``value`` onto the head of the list ``name``" + return self.execute_command('LPUSH', name, value) + + def lpushx(self, name, value): + "Push ``value`` onto the head of the list ``name`` if ``name`` exists" + return self.execute_command('LPUSHX', name, value) + + def lrange(self, name, start, end): + """ + Return a slice of the list ``name`` between + position ``start`` and ``end`` + + ``start`` and ``end`` can be negative numbers just like + Python slicing notation + """ + return self.execute_command('LRANGE', name, start, end) + + def lrem(self, name, value, num=0): + """ + Remove the first ``num`` occurrences of ``value`` from list ``name`` + + If ``num`` is 0, then all occurrences will be removed + """ + return self.execute_command('LREM', name, num, value) + + def lset(self, name, index, value): + "Set ``position`` of list ``name`` to ``value``" + return self.execute_command('LSET', name, index, value) + + def ltrim(self, name, start, end): + """ + Trim the list ``name``, removing all values not within the slice + between ``start`` and ``end`` + + ``start`` and ``end`` can be negative numbers just like + Python slicing notation + """ + return self.execute_command('LTRIM', name, start, end) + + def pop(self, name, tail=False): + """ + Pop and return the first or last element of list ``name`` + + * This method has been deprecated, + use Redis.lpop or Redis.rpop instead * + """ + warnings.warn(DeprecationWarning( + "Redis.pop has been deprecated, " + "use Redis.lpop or Redis.rpop instead")) + if tail: + return self.rpop(name) + return self.lpop(name) + + def push(self, name, value, head=False): + """ + Push ``value`` onto list ``name``. + + * This method has been deprecated, + use Redis.lpush or Redis.rpush instead * + """ + warnings.warn(DeprecationWarning( + "Redis.push has been deprecated, " + "use Redis.lpush or Redis.rpush instead")) + if head: + return self.lpush(name, value) + return self.rpush(name, value) + + def rpop(self, name): + "Remove and return the last item of the list ``name``" + return self.execute_command('RPOP', name) + + def rpoplpush(self, src, dst): + """ + RPOP a value off of the ``src`` list and atomically LPUSH it + on to the ``dst`` list. Returns the value. + """ + return self.execute_command('RPOPLPUSH', src, dst) + + def rpush(self, name, value): + "Push ``value`` onto the tail of the list ``name``" + return self.execute_command('RPUSH', name, value) + + def rpushx(self, name, value): + "Push ``value`` onto the tail of the list ``name`` if ``name`` exists" + return self.execute_command('RPUSHX', name, value) + + def sort(self, name, start=None, num=None, by=None, get=None, + desc=False, alpha=False, store=None): + """ + Sort and return the list, set or sorted set at ``name``. + + ``start`` and ``num`` allow for paging through the sorted data + + ``by`` allows using an external key to weight and sort the items. + Use an "*" to indicate where in the key the item value is located + + ``get`` allows for returning items from external keys rather than the + sorted data itself. Use an "*" to indicate where int he key + the item value is located + + ``desc`` allows for reversing the sort + + ``alpha`` allows for sorting lexicographically rather than numerically + + ``store`` allows for storing the result of the sort into + the key ``store`` + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise RedisError("``start`` and ``num`` must both be specified") + + pieces = [name] + if by is not None: + pieces.append('BY') + pieces.append(by) + if start is not None and num is not None: + pieces.append('LIMIT') + pieces.append(start) + pieces.append(num) + if get is not None: + # If get is a string assume we want to get a single value. + # Otherwise assume it's an interable and we want to get multiple + # values. We can't just iterate blindly because strings are + # iterable. + if isinstance(get, basestring): + pieces.append('GET') + pieces.append(get) + else: + for g in get: + pieces.append('GET') + pieces.append(g) + if desc: + pieces.append('DESC') + if alpha: + pieces.append('ALPHA') + if store is not None: + pieces.append('STORE') + pieces.append(store) + return self.execute_command('SORT', *pieces) + + + #### SET COMMANDS #### + def sadd(self, name, value): + "Add ``value`` to set ``name``" + return self.execute_command('SADD', name, value) + + def scard(self, name): + "Return the number of elements in set ``name``" + return self.execute_command('SCARD', name) + + def sdiff(self, keys, *args): + "Return the difference of sets specified by ``keys``" + keys = list_or_args('sdiff', keys, args) + return self.execute_command('SDIFF', *keys) + + def sdiffstore(self, dest, keys, *args): + """ + Store the difference of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + keys = list_or_args('sdiffstore', keys, args) + return self.execute_command('SDIFFSTORE', dest, *keys) + + def sinter(self, keys, *args): + "Return the intersection of sets specified by ``keys``" + keys = list_or_args('sinter', keys, args) + return self.execute_command('SINTER', *keys) + + def sinterstore(self, dest, keys, *args): + """ + Store the intersection of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + keys = list_or_args('sinterstore', keys, args) + return self.execute_command('SINTERSTORE', dest, *keys) + + def sismember(self, name, value): + "Return a boolean indicating if ``value`` is a member of set ``name``" + return self.execute_command('SISMEMBER', name, value) + + def smembers(self, name): + "Return all members of the set ``name``" + return self.execute_command('SMEMBERS', name) + + def smove(self, src, dst, value): + "Move ``value`` from set ``src`` to set ``dst`` atomically" + return self.execute_command('SMOVE', src, dst, value) + + def spop(self, name): + "Remove and return a random member of set ``name``" + return self.execute_command('SPOP', name) + + def srandmember(self, name): + "Return a random member of set ``name``" + return self.execute_command('SRANDMEMBER', name) + + def srem(self, name, value): + "Remove ``value`` from set ``name``" + return self.execute_command('SREM', name, value) + + def sunion(self, keys, *args): + "Return the union of sets specifiued by ``keys``" + keys = list_or_args('sunion', keys, args) + return self.execute_command('SUNION', *keys) + + def sunionstore(self, dest, keys, *args): + """ + Store the union of sets specified by ``keys`` into a new + set named ``dest``. Returns the number of keys in the new set. + """ + keys = list_or_args('sunionstore', keys, args) + return self.execute_command('SUNIONSTORE', dest, *keys) + + + #### SORTED SET COMMANDS #### + def zadd(self, name, value, score): + "Add member ``value`` with score ``score`` to sorted set ``name``" + return self.execute_command('ZADD', name, score, value) + + def zcard(self, name): + "Return the number of elements in the sorted set ``name``" + return self.execute_command('ZCARD', name) + + def zcount(self, name, min, max): + return self.execute_command('ZCOUNT', name, min, max) + + def zincr(self, key, member, value=1): + "This has been deprecated, use zincrby instead" + warnings.warn(DeprecationWarning( + "Redis.zincr has been deprecated, use Redis.zincrby instead" + )) + return self.zincrby(key, member, value) + + def zincrby(self, name, value, amount=1): + "Increment the score of ``value`` in sorted set ``name`` by ``amount``" + return self.execute_command('ZINCRBY', name, amount, value) + + def zinter(self, dest, keys, aggregate=None): + warnings.warn(DeprecationWarning( + "Redis.zinter has been deprecated, use Redis.zinterstore instead" + )) + return self.zinterstore(dest, keys, aggregate) + + def zinterstore(self, dest, keys, aggregate=None): + """ + Intersect multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZINTERSTORE', dest, keys, aggregate) + + def zrange(self, name, start, end, desc=False, withscores=False): + """ + Return a range of values from sorted set ``name`` between + ``start`` and ``end`` sorted in ascending order. + + ``start`` and ``end`` can be negative, indicating the end of the range. + + ``desc`` indicates to sort in descending order. + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + """ + if desc: + return self.zrevrange(name, start, end, withscores) + pieces = ['ZRANGE', name, start, end] + if withscores: + pieces.append('withscores') + return self.execute_command(*pieces, **{'withscores': withscores}) + + def zrangebyscore(self, name, min, max, + start=None, num=None, withscores=False): + """ + Return a range of values from the sorted set ``name`` with scores + between ``min`` and ``max``. + + If ``start`` and ``num`` are specified, then return a slice of the range. + + ``withscores`` indicates to return the scores along with the values. + The return type is a list of (value, score) pairs + """ + if (start is not None and num is None) or \ + (num is not None and start is None): + raise RedisError("``start`` and ``num`` must both be specified") + pieces = ['ZRANGEBYSCORE', name, min, max] + if start is not None and num is not None: + pieces.extend(['LIMIT', start, num]) + if withscores: + pieces.append('withscores') + return self.execute_command(*pieces, **{'withscores': withscores}) + + def zrank(self, name, value): + """ + Returns a 0-based value indicating the rank of ``value`` in sorted set + ``name`` + """ + return self.execute_command('ZRANK', name, value) + + def zrem(self, name, value): + "Remove member ``value`` from sorted set ``name``" + return self.execute_command('ZREM', name, value) + + def zremrangebyrank(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` with ranks between + ``min`` and ``max``. Values are 0-based, ordered from smallest score + to largest. Values can be negative indicating the highest scores. + Returns the number of elements removed + """ + return self.execute_command('ZREMRANGEBYRANK', name, min, max) + + def zremrangebyscore(self, name, min, max): + """ + Remove all elements in the sorted set ``name`` with scores + between ``min`` and ``max``. Returns the number of elements removed. + """ + return self.execute_command('ZREMRANGEBYSCORE', name, min, max) + + def zrevrange(self, name, start, num, withscores=False): + """ + Return a range of values from sorted set ``name`` between + ``start`` and ``num`` sorted in descending order. + + ``start`` and ``num`` can be negative, indicating the end of the range. + + ``withscores`` indicates to return the scores along with the values + as a dictionary of value => score + """ + pieces = ['ZREVRANGE', name, start, num] + if withscores: + pieces.append('withscores') + return self.execute_command(*pieces, **{'withscores': withscores}) + + def zrevrank(self, name, value): + """ + Returns a 0-based value indicating the descending rank of + ``value`` in sorted set ``name`` + """ + return self.execute_command('ZREVRANK', name, value) + + def zscore(self, name, value): + "Return the score of element ``value`` in sorted set ``name``" + return self.execute_command('ZSCORE', name, value) + + def zunion(self, dest, keys, aggregate=None): + warnings.warn(DeprecationWarning( + "Redis.zunion has been deprecated, use Redis.zunionstore instead" + )) + return self.zunionstore(dest, keys, aggregate) + + def zunionstore(self, dest, keys, aggregate=None): + """ + Union multiple sorted sets specified by ``keys`` into + a new sorted set, ``dest``. Scores in the destination will be + aggregated based on the ``aggregate``, or SUM if none is provided. + """ + return self._zaggregate('ZUNIONSTORE', dest, keys, aggregate) + + def _zaggregate(self, command, dest, keys, aggregate=None): + pieces = [command, dest, len(keys)] + if isinstance(keys, dict): + items = keys.items() + keys = [i[0] for i in items] + weights = [i[1] for i in items] + else: + weights = None + pieces.extend(keys) + if weights: + pieces.append('WEIGHTS') + pieces.extend(weights) + if aggregate: + pieces.append('AGGREGATE') + pieces.append(aggregate) + return self.execute_command(*pieces) + + #### HASH COMMANDS #### + def hdel(self, name, key): + "Delete ``key`` from hash ``name``" + return self.execute_command('HDEL', name, key) + + def hexists(self, name, key): + "Returns a boolean indicating if ``key`` exists within hash ``name``" + return self.execute_command('HEXISTS', name, key) + + def hget(self, name, key): + "Return the value of ``key`` within the hash ``name``" + return self.execute_command('HGET', name, key) + + def hgetall(self, name): + "Return a Python dict of the hash's name/value pairs" + return self.execute_command('HGETALL', name) + + def hincrby(self, name, key, amount=1): + "Increment the value of ``key`` in hash ``name`` by ``amount``" + return self.execute_command('HINCRBY', name, key, amount) + + def hkeys(self, name): + "Return the list of keys within hash ``name``" + return self.execute_command('HKEYS', name) + + def hlen(self, name): + "Return the number of elements in hash ``name``" + return self.execute_command('HLEN', name) + + def hset(self, name, key, value): + """ + Set ``key`` to ``value`` within hash ``name`` + Returns 1 if HSET created a new field, otherwise 0 + """ + return self.execute_command('HSET', name, key, value) + + def hsetnx(self, name, key, value): + """ + Set ``key`` to ``value`` within hash ``name`` if ``key`` does not + exist. Returns 1 if HSETNX created a field, otherwise 0. + """ + return self.execute_command("HSETNX", name, key, value) + + def hmset(self, name, mapping): + """ + Sets each key in the ``mapping`` dict to its corresponding value + in the hash ``name`` + """ + items = [] + for pair in mapping.iteritems(): + items.extend(pair) + return self.execute_command('HMSET', name, *items) + + def hmget(self, name, keys): + "Returns a list of values ordered identically to ``keys``" + return self.execute_command('HMGET', name, *keys) + + def hvals(self, name): + "Return the list of values within hash ``name``" + return self.execute_command('HVALS', name) + + + # channels + def psubscribe(self, patterns): + "Subscribe to all channels matching any pattern in ``patterns``" + if isinstance(patterns, basestring): + patterns = [patterns] + response = self.execute_command('PSUBSCRIBE', *patterns) + # this is *after* the SUBSCRIBE in order to allow for lazy and broken + # connections that need to issue AUTH and SELECT commands + self.subscribed = True + return response + + def punsubscribe(self, patterns=[]): + """ + Unsubscribe from any channel matching any pattern in ``patterns``. + If empty, unsubscribe from all channels. + """ + if isinstance(patterns, basestring): + patterns = [patterns] + return self.execute_command('PUNSUBSCRIBE', *patterns) + + def subscribe(self, channels): + "Subscribe to ``channels``, waiting for messages to be published" + if isinstance(channels, basestring): + channels = [channels] + response = self.execute_command('SUBSCRIBE', *channels) + # this is *after* the SUBSCRIBE in order to allow for lazy and broken + # connections that need to issue AUTH and SELECT commands + self.subscribed = True + return response + + def unsubscribe(self, channels=[]): + """ + Unsubscribe from ``channels``. If empty, unsubscribe + from all channels + """ + if isinstance(channels, basestring): + channels = [channels] + return self.execute_command('UNSUBSCRIBE', *channels) + + def publish(self, channel, message): + """ + Publish ``message`` on ``channel``. + Returns the number of subscribers the message was delivered to. + """ + return self.execute_command('PUBLISH', channel, message) + + def listen(self): + "Listen for messages on channels this client has been subscribed to" + while self.subscribed: + r = self.parse_response('LISTEN') + if r[0] == 'pmessage': + msg = { + 'type': r[0], + 'pattern': r[1], + 'channel': r[2], + 'data': r[3] + } + else: + msg = { + 'type': r[0], + 'pattern': None, + 'channel': r[1], + 'data': r[2] + } + if r[0] == 'unsubscribe' and r[2] == 0: + self.subscribed = False + yield msg + + +class Pipeline(Redis): + """ + Pipelines provide a way to transmit multiple commands to the Redis server + in one transmission. This is convenient for batch processing, such as + saving all the values in a list to Redis. + + All commands executed within a pipeline are wrapped with MULTI and EXEC + calls. This guarantees all commands executed in the pipeline will be + executed atomically. + + Any command raising an exception does *not* halt the execution of + subsequent commands in the pipeline. Instead, the exception is caught + and its instance is placed into the response list returned by execute(). + Code iterating over the response list should be able to deal with an + instance of an exception as a potential value. In general, these will be + ResponseError exceptions, such as those raised when issuing a command + on a key of a different datatype. + """ + def __init__(self, connection, transaction, charset, errors): + self.connection = connection + self.transaction = transaction + self.encoding = charset + self.errors = errors + self.subscribed = False # NOTE not in use, but necessary + self.reset() + + def reset(self): + self.command_stack = [] + + def _execute_command(self, command_name, command, **options): + """ + Stage a command to be executed when execute() is next called + + Returns the current Pipeline object back so commands can be + chained together, such as: + + pipe = pipe.set('foo', 'bar').incr('baz').decr('bang') + + At some other point, you can then run: pipe.execute(), + which will execute all commands queued in the pipe. + """ + # if the command_name is 'AUTH' or 'SELECT', then this command + # must have originated after a socket connection and a call to + # _setup_connection(). run these commands immediately without + # buffering them. + if command_name in ('AUTH', 'SELECT'): + return super(Pipeline, self)._execute_command( + command_name, command, **options) + else: + self.command_stack.append((command_name, command, options)) + return self + + def _execute_transaction(self, commands): + # wrap the commands in MULTI ... EXEC statements to indicate an + # atomic operation + all_cmds = ''.join([c for _1, c, _2 in chain( + (('', 'MULTI\r\n', ''),), + commands, + (('', 'EXEC\r\n', ''),) + )]) + self.connection.send(all_cmds, self) + # parse off the response for MULTI and all commands prior to EXEC + for i in range(len(commands)+1): + _ = self.parse_response('_') + # parse the EXEC. we want errors returned as items in the response + response = self.parse_response('_', catch_errors=True) + + if response is None: + raise WatchError("Watched variable changed.") + + if len(response) != len(commands): + raise ResponseError("Wrong number of response items from " + "pipeline execution") + # Run any callbacks for the commands run in the pipeline + data = [] + for r, cmd in zip(response, commands): + if not isinstance(r, Exception): + if cmd[0] in self.RESPONSE_CALLBACKS: + r = self.RESPONSE_CALLBACKS[cmd[0]](r, **cmd[2]) + data.append(r) + return data + + def _execute_pipeline(self, commands): + # build up all commands into a single request to increase network perf + all_cmds = ''.join([c for _1, c, _2 in commands]) + self.connection.send(all_cmds, self) + data = [] + for command_name, _, options in commands: + data.append( + self.parse_response(command_name, catch_errors=True, **options) + ) + return data + + def execute(self): + "Execute all the commands in the current pipeline" + stack = self.command_stack + self.reset() + if self.transaction: + execute = self._execute_transaction + else: + execute = self._execute_pipeline + try: + return execute(stack) + except ConnectionError: + self.connection.disconnect() + return execute(stack) + + def select(self, *args, **kwargs): + raise RedisError("Cannot select a different database from a pipeline") + + +class Lock(object): + """ + A shared, distributed Lock. Using Redis for locking allows the Lock + to be shared across processes and/or machines. + + It's left to the user to resolve deadlock issues and make sure + multiple clients play nicely together. + """ + + LOCK_FOREVER = 2**31+1 # 1 past max unix time + + def __init__(self, redis, name, timeout=None, sleep=0.1): + """ + Create a new Lock instnace named ``name`` using the Redis client + supplied by ``redis``. + + ``timeout`` indicates a maximum life for the lock. + By default, it will remain locked until release() is called. + + ``sleep`` indicates the amount of time to sleep per loop iteration + when the lock is in blocking mode and another client is currently + holding the lock. + + Note: If using ``timeout``, you should make sure all the hosts + that are running clients are within the same timezone and are using + a network time service like ntp. + """ + self.redis = redis + self.name = name + self.acquired_until = None + self.timeout = timeout + self.sleep = sleep + + def __enter__(self): + return self.acquire() + + def __exit__(self, exc_type, exc_value, traceback): + self.release() + + def acquire(self, blocking=True): + """ + Use Redis to hold a shared, distributed lock named ``name``. + Returns True once the lock is acquired. + + If ``blocking`` is False, always return immediately. If the lock + was acquired, return True, otherwise return False. + """ + sleep = self.sleep + timeout = self.timeout + while 1: + unixtime = int(time.time()) + if timeout: + timeout_at = unixtime + timeout + else: + timeout_at = Lock.LOCK_FOREVER + if self.redis.setnx(self.name, timeout_at): + self.acquired_until = timeout_at + return True + # We want blocking, but didn't acquire the lock + # check to see if the current lock is expired + existing = long(self.redis.get(self.name) or 1) + if existing < unixtime: + # the previous lock is expired, attempt to overwrite it + existing = long(self.redis.getset(self.name, timeout_at) or 1) + if existing < unixtime: + # we successfully acquired the lock + self.acquired_until = timeout_at + return True + if not blocking: + return False + time.sleep(sleep) + + def release(self): + "Releases the already acquired lock" + if self.acquired_until is None: + raise ValueError("Cannot release an unlocked lock") + existing = long(self.redis.get(self.name) or 1) + # if the lock time is in the future, delete the lock + if existing >= self.acquired_until: + self.redis.delete(self.name) + self.acquired_until = None diff --git a/src/andymccurdy-redis-py-7112f5b/redis/exceptions.py b/src/andymccurdy-redis-py-7112f5b/redis/exceptions.py new file mode 100644 index 0000000..b3257ac --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/redis/exceptions.py @@ -0,0 +1,22 @@ +"Core exceptions raised by the Redis client" + +class RedisError(Exception): + pass + +class AuthenticationError(RedisError): + pass + +class ConnectionError(RedisError): + pass + +class ResponseError(RedisError): + pass + +class InvalidResponse(RedisError): + pass + +class InvalidData(RedisError): + pass + +class WatchError(RedisError): + pass diff --git a/src/andymccurdy-redis-py-7112f5b/run_tests b/src/andymccurdy-redis-py-7112f5b/run_tests new file mode 100755 index 0000000..2d629c6 --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/run_tests @@ -0,0 +1,9 @@ +#!/usr/bin/env python + +import unittest +from tests import all_tests + + +if __name__ == "__main__": + tests = all_tests() + results = unittest.TextTestRunner().run(tests) diff --git a/src/andymccurdy-redis-py-7112f5b/setup.py b/src/andymccurdy-redis-py-7112f5b/setup.py new file mode 100644 index 0000000..fb9ea54 --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/setup.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +from redis import __version__ + +sdict = { + 'name' : 'redis', + 'version' : __version__, + 'description' : 'Python client for Redis key-value store', + 'long_description' : 'Python client for Redis key-value store', + 'url': 'http://github.com/andymccurdy/redis-py', + 'download_url' : 'http://cloud.github.com/downloads/andymccurdy/redis-py/redis-%s.tar.gz' % __version__, + 'author' : 'Andy McCurdy', + 'author_email' : 'sedrik@gmail.com', + 'maintainer' : 'Andy McCurdy', + 'maintainer_email' : 'sedrik@gmail.com', + 'keywords' : ['Redis', 'key-value store'], + 'license' : 'MIT', + 'packages' : ['redis'], + 'test_suite' : 'tests.all_tests', + 'classifiers' : [ + 'Development Status :: 5 - Production/Stable', + 'Environment :: Console', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Programming Language :: Python'], +} + +try: + from setuptools import setup +except ImportError: + from distutils.core import setup + +setup(**sdict) + diff --git a/src/andymccurdy-redis-py-7112f5b/tests/__init__.py b/src/andymccurdy-redis-py-7112f5b/tests/__init__.py new file mode 100644 index 0000000..8931b07 --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/tests/__init__.py @@ -0,0 +1,13 @@ +import unittest +from server_commands import ServerCommandsTestCase +from connection_pool import ConnectionPoolTestCase +from pipeline import PipelineTestCase +from lock import LockTestCase + +def all_tests(): + suite = unittest.TestSuite() + suite.addTest(unittest.makeSuite(ServerCommandsTestCase)) + suite.addTest(unittest.makeSuite(ConnectionPoolTestCase)) + suite.addTest(unittest.makeSuite(PipelineTestCase)) + suite.addTest(unittest.makeSuite(LockTestCase)) + return suite diff --git a/src/andymccurdy-redis-py-7112f5b/tests/connection_pool.py b/src/andymccurdy-redis-py-7112f5b/tests/connection_pool.py new file mode 100644 index 0000000..56f5f43 --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/tests/connection_pool.py @@ -0,0 +1,53 @@ +import redis +import threading +import time +import unittest + +class ConnectionPoolTestCase(unittest.TestCase): + def test_multiple_connections(self): + # 2 clients to the same host/port/db/pool should use the same connection + pool = redis.ConnectionPool() + r1 = redis.Redis(host='localhost', port=6379, db=9, connection_pool=pool) + r2 = redis.Redis(host='localhost', port=6379, db=9, connection_pool=pool) + self.assertEquals(r1.connection, r2.connection) + + # if one of them switches, they should have + # separate conncetion objects + r2.select(db=10, host='localhost', port=6379) + self.assertNotEqual(r1.connection, r2.connection) + + conns = [r1.connection, r2.connection] + conns.sort() + + # but returning to the original state shares the object again + r2.select(db=9, host='localhost', port=6379) + self.assertEquals(r1.connection, r2.connection) + + # the connection manager should still have just 2 connections + mgr_conns = pool.get_all_connections() + mgr_conns.sort() + self.assertEquals(conns, mgr_conns) + + def test_threaded_workers(self): + r = redis.Redis(host='localhost', port=6379, db=9) + r.set('a', 'foo') + r.set('b', 'bar') + + def _info_worker(): + for i in range(50): + _ = r.info() + time.sleep(0.01) + + def _keys_worker(): + for i in range(50): + _ = r.keys() + time.sleep(0.01) + + t1 = threading.Thread(target=_info_worker) + t2 = threading.Thread(target=_keys_worker) + t1.start() + t2.start() + + for i in [t1, t2]: + i.join() + diff --git a/src/andymccurdy-redis-py-7112f5b/tests/lock.py b/src/andymccurdy-redis-py-7112f5b/tests/lock.py new file mode 100644 index 0000000..a6e447d --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/tests/lock.py @@ -0,0 +1,53 @@ +from __future__ import with_statement +import redis +import time +import unittest +from redis.client import Lock + +class LockTestCase(unittest.TestCase): + def setUp(self): + self.client = redis.Redis(host='localhost', port=6379, db=9) + self.client.flushdb() + + def tearDown(self): + self.client.flushdb() + + def test_lock(self): + lock = self.client.lock('foo') + self.assert_(lock.acquire()) + self.assertEquals(self.client['foo'], str(Lock.LOCK_FOREVER)) + lock.release() + self.assertEquals(self.client['foo'], None) + + def test_competing_locks(self): + lock1 = self.client.lock('foo') + lock2 = self.client.lock('foo') + self.assert_(lock1.acquire()) + self.assertFalse(lock2.acquire(blocking=False)) + lock1.release() + self.assert_(lock2.acquire()) + self.assertFalse(lock1.acquire(blocking=False)) + lock2.release() + + def test_timeouts(self): + lock1 = self.client.lock('foo', timeout=1) + lock2 = self.client.lock('foo') + self.assert_(lock1.acquire()) + self.assertEquals(lock1.acquired_until, long(time.time()) + 1) + self.assertEquals(lock1.acquired_until, long(self.client['foo'])) + self.assertFalse(lock2.acquire(blocking=False)) + time.sleep(2) # need to wait up to 2 seconds for lock to timeout + self.assert_(lock2.acquire(blocking=False)) + lock2.release() + + def test_non_blocking(self): + lock1 = self.client.lock('foo') + self.assert_(lock1.acquire(blocking=False)) + self.assert_(lock1.acquired_until) + lock1.release() + self.assert_(lock1.acquired_until is None) + + def test_context_manager(self): + with self.client.lock('foo'): + self.assertEquals(self.client['foo'], str(Lock.LOCK_FOREVER)) + self.assertEquals(self.client['foo'], None) diff --git a/src/andymccurdy-redis-py-7112f5b/tests/pipeline.py b/src/andymccurdy-redis-py-7112f5b/tests/pipeline.py new file mode 100644 index 0000000..dcbfb0a --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/tests/pipeline.py @@ -0,0 +1,61 @@ +import redis +import unittest + +class PipelineTestCase(unittest.TestCase): + def setUp(self): + self.client = redis.Redis(host='localhost', port=6379, db=9) + self.client.flushdb() + + def tearDown(self): + self.client.flushdb() + + def test_pipeline(self): + pipe = self.client.pipeline() + pipe.set('a', 'a1').get('a').zadd('z', 'z1', 1).zadd('z', 'z2', 4) + pipe.zincrby('z', 'z1').zrange('z', 0, 5, withscores=True) + self.assertEquals(pipe.execute(), + [ + True, + 'a1', + True, + True, + 2.0, + [('z1', 2.0), ('z2', 4)], + ] + ) + + def test_invalid_command_in_pipeline(self): + # all commands but the invalid one should be excuted correctly + self.client['c'] = 'a' + pipe = self.client.pipeline() + pipe.set('a', 1).set('b', 2).lpush('c', 3).set('d', 4) + result = pipe.execute() + + self.assertEquals(result[0], True) + self.assertEquals(self.client['a'], '1') + self.assertEquals(result[1], True) + self.assertEquals(self.client['b'], '2') + # we can't lpush to a key that's a string value, so this should + # be a ResponseError exception + self.assert_(isinstance(result[2], redis.ResponseError)) + self.assertEquals(self.client['c'], 'a') + self.assertEquals(result[3], True) + self.assertEquals(self.client['d'], '4') + + # make sure the pipe was restored to a working state + self.assertEquals(pipe.set('z', 'zzz').execute(), [True]) + self.assertEquals(self.client['z'], 'zzz') + + def test_pipeline_cannot_select(self): + pipe = self.client.pipeline() + self.assertRaises(redis.RedisError, + pipe.select, 'localhost', 6379, db=9) + + def test_pipeline_no_transaction(self): + pipe = self.client.pipeline(transaction=False) + pipe.set('a', 'a1').set('b', 'b1').set('c', 'c1') + self.assertEquals(pipe.execute(), [True, True, True]) + self.assertEquals(self.client['a'], 'a1') + self.assertEquals(self.client['b'], 'b1') + self.assertEquals(self.client['c'], 'c1') + diff --git a/src/andymccurdy-redis-py-7112f5b/tests/server_commands.py b/src/andymccurdy-redis-py-7112f5b/tests/server_commands.py new file mode 100644 index 0000000..5cd63c5 --- /dev/null +++ b/src/andymccurdy-redis-py-7112f5b/tests/server_commands.py @@ -0,0 +1,1260 @@ +import redis +import unittest +import datetime +import threading +import time +from distutils.version import StrictVersion + +class ServerCommandsTestCase(unittest.TestCase): + + def get_client(self): + return redis.Redis(host='localhost', port=6379, db=9) + + def setUp(self): + self.client = self.get_client() + self.client.flushdb() + + def tearDown(self): + self.client.flushdb() + for c in self.client.connection_pool.get_all_connections(): + c.disconnect() + + # GENERAL SERVER COMMANDS + def test_dbsize(self): + self.client['a'] = 'foo' + self.client['b'] = 'bar' + self.assertEquals(self.client.dbsize(), 2) + + def test_get_and_set(self): + # get and set can't be tested independently of each other + self.assertEquals(self.client.get('a'), None) + byte_string = 'value' + integer = 5 + unicode_string = unichr(3456) + u'abcd' + unichr(3421) + self.assert_(self.client.set('byte_string', byte_string)) + self.assert_(self.client.set('integer', 5)) + self.assert_(self.client.set('unicode_string', unicode_string)) + self.assertEquals(self.client.get('byte_string'), byte_string) + self.assertEquals(self.client.get('integer'), str(integer)) + self.assertEquals(self.client.get('unicode_string').decode('utf-8'), unicode_string) + + def test_getitem_and_setitem(self): + self.client['a'] = 'bar' + self.assertEquals(self.client['a'], 'bar') + + def test_delete(self): + self.assertEquals(self.client.delete('a'), False) + self.client['a'] = 'foo' + self.assertEquals(self.client.delete('a'), True) + + def test_delitem(self): + self.client['a'] = 'foo' + del self.client['a'] + self.assertEquals(self.client['a'], None) + + def test_config_get(self): + data = self.client.config_get() + self.assert_('maxmemory' in data) + self.assert_(data['maxmemory'].isdigit()) + + def test_config_set(self): + data = self.client.config_get() + rdbname = data['dbfilename'] + self.assert_(self.client.config_set('dbfilename', 'redis_py_test.rdb')) + self.assertEquals( + self.client.config_get()['dbfilename'], + 'redis_py_test.rdb' + ) + self.assert_(self.client.config_set('dbfilename', rdbname)) + self.assertEquals(self.client.config_get()['dbfilename'], rdbname) + + def test_info(self): + self.client['a'] = 'foo' + self.client['b'] = 'bar' + info = self.client.info() + self.assert_(isinstance(info, dict)) + self.assertEquals(info['db9']['keys'], 2) + + def test_lastsave(self): + self.assert_(isinstance(self.client.lastsave(), datetime.datetime)) + + def test_ping(self): + self.assertEquals(self.client.ping(), True) + + + # KEYS + def test_append(self): + # invalid key type + self.client.rpush('a', 'a1') + self.assertRaises(redis.ResponseError, self.client.append, 'a', 'a1') + del self.client['a'] + # real logic + self.assertEquals(self.client.append('a', 'a1'), 2) + self.assertEquals(self.client['a'], 'a1') + self.assert_(self.client.append('a', 'a2'), 4) + self.assertEquals(self.client['a'], 'a1a2') + + def test_decr(self): + self.assertEquals(self.client.decr('a'), -1) + self.assertEquals(self.client['a'], '-1') + self.assertEquals(self.client.decr('a'), -2) + self.assertEquals(self.client['a'], '-2') + self.assertEquals(self.client.decr('a', amount=5), -7) + self.assertEquals(self.client['a'], '-7') + + def test_exists(self): + self.assertEquals(self.client.exists('a'), False) + self.client['a'] = 'foo' + self.assertEquals(self.client.exists('a'), True) + + def test_expire(self): + self.assertEquals(self.client.expire('a', 10), False) + self.client['a'] = 'foo' + self.assertEquals(self.client.expire('a', 10), True) + self.assertEquals(self.client.ttl('a'), 10) + self.assertEquals(self.client.persist('a'), True) + self.assertEquals(self.client.ttl('a'), None) + + def test_expireat(self): + expire_at = datetime.datetime.now() + datetime.timedelta(minutes=1) + self.assertEquals(self.client.expireat('a', expire_at), False) + self.client['a'] = 'foo' + # expire at in unix time + expire_at_seconds = int(time.mktime(expire_at.timetuple())) + self.assertEquals(self.client.expireat('a', expire_at_seconds), True) + self.assertEquals(self.client.ttl('a'), 60) + # expire at given a datetime object + self.client['b'] = 'bar' + self.assertEquals(self.client.expireat('b', expire_at), True) + self.assertEquals(self.client.ttl('b'), 60) + + def test_get_set_bit(self): + self.assertEquals(self.client.getbit('a', 5), False) + self.assertEquals(self.client.setbit('a', 5, True), False) + self.assertEquals(self.client.getbit('a', 5), True) + self.assertEquals(self.client.setbit('a', 4, False), False) + self.assertEquals(self.client.getbit('a', 4), False) + self.assertEquals(self.client.setbit('a', 4, True), False) + self.assertEquals(self.client.setbit('a', 5, True), True) + self.assertEquals(self.client.getbit('a', 4), True) + self.assertEquals(self.client.getbit('a', 5), True) + + def test_getset(self): + self.assertEquals(self.client.getset('a', 'foo'), None) + self.assertEquals(self.client.getset('a', 'bar'), 'foo') + + def test_incr(self): + self.assertEquals(self.client.incr('a'), 1) + self.assertEquals(self.client['a'], '1') + self.assertEquals(self.client.incr('a'), 2) + self.assertEquals(self.client['a'], '2') + self.assertEquals(self.client.incr('a', amount=5), 7) + self.assertEquals(self.client['a'], '7') + + def test_keys(self): + self.assertEquals(self.client.keys(), []) + keys = set(['test_a', 'test_b', 'testc']) + for key in keys: + self.client[key] = 1 + self.assertEquals(set(self.client.keys(pattern='test_*')), + keys - set(['testc'])) + self.assertEquals(set(self.client.keys(pattern='test*')), keys) + + def test_mget(self): + self.assertEquals(self.client.mget(['a', 'b']), [None, None]) + self.client['a'] = '1' + self.client['b'] = '2' + self.client['c'] = '3' + self.assertEquals(self.client.mget(['a', 'other', 'b', 'c']), + ['1', None, '2', '3']) + + def test_mset(self): + d = {'a': '1', 'b': '2', 'c': '3'} + self.assert_(self.client.mset(d)) + for k,v in d.iteritems(): + self.assertEquals(self.client[k], v) + + def test_msetnx(self): + d = {'a': '1', 'b': '2', 'c': '3'} + self.assert_(self.client.msetnx(d)) + d2 = {'a': 'x', 'd': '4'} + self.assert_(not self.client.msetnx(d2)) + for k,v in d.iteritems(): + self.assertEquals(self.client[k], v) + self.assertEquals(self.client['d'], None) + + def test_randomkey(self): + self.assertEquals(self.client.randomkey(), None) + self.client['a'] = '1' + self.client['b'] = '2' + self.client['c'] = '3' + self.assert_(self.client.randomkey() in ('a', 'b', 'c')) + + def test_rename(self): + self.client['a'] = '1' + self.assert_(self.client.rename('a', 'b')) + self.assertEquals(self.client['a'], None) + self.assertEquals(self.client['b'], '1') + + def test_renamenx(self): + self.client['a'] = '1' + self.client['b'] = '2' + self.assert_(not self.client.renamenx('a', 'b')) + self.assertEquals(self.client['a'], '1') + self.assertEquals(self.client['b'], '2') + + def test_setex(self): + self.assertEquals(self.client.setex('a', '1', 60), True) + self.assertEquals(self.client['a'], '1') + self.assertEquals(self.client.ttl('a'), 60 ) + + def test_setnx(self): + self.assert_(self.client.setnx('a', '1')) + self.assertEquals(self.client['a'], '1') + self.assert_(not self.client.setnx('a', '2')) + self.assertEquals(self.client['a'], '1') + + def test_setrange(self): + self.assertEquals(self.client.setrange('a', 5, 'abcdef'), 11) + self.assertEquals(self.client['a'], '\0\0\0\0\0abcdef') + self.client['a'] = 'Hello World' + self.assertEquals(self.client.setrange('a', 6, 'Redis'), 11) + self.assertEquals(self.client['a'], 'Hello Redis') + + def test_strlen(self): + self.client['a'] = 'abcdef' + self.assertEquals(self.client.strlen('a'), 6) + + def test_substr(self): + # invalid key type + self.client.rpush('a', 'a1') + self.assertRaises(redis.ResponseError, self.client.substr, 'a', 0) + del self.client['a'] + # real logic + self.client['a'] = 'abcdefghi' + self.assertEquals(self.client.substr('a', 0), 'abcdefghi') + self.assertEquals(self.client.substr('a', 2), 'cdefghi') + self.assertEquals(self.client.substr('a', 3, 5), 'def') + self.assertEquals(self.client.substr('a', 3, -2), 'defgh') + self.client['a'] = 123456 # does substr work with ints? + self.assertEquals(self.client.substr('a', 2, -2), '345') + + def test_type(self): + self.assertEquals(self.client.type('a'), 'none') + self.client['a'] = '1' + self.assertEquals(self.client.type('a'), 'string') + del self.client['a'] + self.client.lpush('a', '1') + self.assertEquals(self.client.type('a'), 'list') + del self.client['a'] + self.client.sadd('a', '1') + self.assertEquals(self.client.type('a'), 'set') + del self.client['a'] + self.client.zadd('a', '1', 1) + self.assertEquals(self.client.type('a'), 'zset') + + def test_watch(self): + self.client.set("a", 1) + + self.client.watch("a") + pipeline = self.client.pipeline() + pipeline.set("a", 2) + self.assertEquals(pipeline.execute(), [True]) + + self.client.set("b", 1) + self.client.watch("b") + self.get_client().set("b", 2) + pipeline = self.client.pipeline() + pipeline.set("b", 3) + + self.assertRaises(redis.exceptions.WatchError, pipeline.execute) + + def test_unwatch(self): + self.assertEquals(self.client.unwatch(), True) + + # LISTS + def make_list(self, name, l): + for i in l: + self.client.rpush(name, i) + + def test_blpop(self): + self.make_list('a', 'ab') + self.make_list('b', 'cd') + self.assertEquals(self.client.blpop(['b', 'a'], timeout=1), ('b', 'c')) + self.assertEquals(self.client.blpop(['b', 'a'], timeout=1), ('b', 'd')) + self.assertEquals(self.client.blpop(['b', 'a'], timeout=1), ('a', 'a')) + self.assertEquals(self.client.blpop(['b', 'a'], timeout=1), ('a', 'b')) + self.assertEquals(self.client.blpop(['b', 'a'], timeout=1), None) + self.make_list('c', 'a') + self.assertEquals(self.client.blpop('c', timeout=1), ('c', 'a')) + + def test_brpop(self): + self.make_list('a', 'ab') + self.make_list('b', 'cd') + self.assertEquals(self.client.brpop(['b', 'a'], timeout=1), ('b', 'd')) + self.assertEquals(self.client.brpop(['b', 'a'], timeout=1), ('b', 'c')) + self.assertEquals(self.client.brpop(['b', 'a'], timeout=1), ('a', 'b')) + self.assertEquals(self.client.brpop(['b', 'a'], timeout=1), ('a', 'a')) + self.assertEquals(self.client.brpop(['b', 'a'], timeout=1), None) + self.make_list('c', 'a') + self.assertEquals(self.client.brpop('c', timeout=1), ('c', 'a')) + + def test_brpoplpush(self): + self.make_list('a', '12') + self.make_list('b', '34') + self.assertEquals(self.client.brpoplpush('a', 'b'), '2') + self.assertEquals(self.client.brpoplpush('a', 'b'), '1') + self.assertEquals(self.client.brpoplpush('a', 'b', timeout=1), None) + self.assertEquals(self.client.lrange('a', 0, -1), []) + self.assertEquals(self.client.lrange('b', 0, -1), ['1', '2', '3', '4']) + + def test_lindex(self): + # no key + self.assertEquals(self.client.lindex('a', '0'), None) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lindex, 'a', '0') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.lindex('a', '0'), 'a') + self.assertEquals(self.client.lindex('a', '1'), 'b') + self.assertEquals(self.client.lindex('a', '2'), 'c') + + def test_linsert(self): + # no key + self.assertEquals(self.client.linsert('a', 'after', 'x', 'y'), 0) + # key is not a list + self.client['a'] = 'b' + self.assertRaises( + redis.ResponseError, self.client.linsert, 'a', 'after', 'x', 'y' + ) + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.linsert('a', 'after', 'b', 'b1'), 4) + self.assertEquals(self.client.lrange('a', 0, -1), + ['a', 'b', 'b1', 'c']) + self.assertEquals(self.client.linsert('a', 'before', 'b', 'a1'), 5) + self.assertEquals(self.client.lrange('a', 0, -1), + ['a', 'a1', 'b', 'b1', 'c']) + + def test_llen(self): + # no key + self.assertEquals(self.client.llen('a'), 0) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.llen, 'a') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.llen('a'), 3) + + def test_lpop(self): + # no key + self.assertEquals(self.client.lpop('a'), None) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lpop, 'a') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.lpop('a'), 'a') + self.assertEquals(self.client.lpop('a'), 'b') + self.assertEquals(self.client.lpop('a'), 'c') + self.assertEquals(self.client.lpop('a'), None) + + def test_lpush(self): + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lpush, 'a', 'a') + del self.client['a'] + # real logic + version = self.client.info()['redis_version'] + if StrictVersion(version) >= StrictVersion('1.3.4'): + self.assertEqual(1, self.client.lpush('a', 'b')) + self.assertEqual(2, self.client.lpush('a', 'a')) + else: + self.assert_(self.client.lpush('a', 'b')) + self.assert_(self.client.lpush('a', 'a')) + self.assertEquals(self.client.lindex('a', 0), 'a') + self.assertEquals(self.client.lindex('a', 1), 'b') + + def test_lpushx(self): + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lpushx, 'a', 'a') + del self.client['a'] + # real logic + self.assertEquals(self.client.lpushx('a', 'b'), 0) + self.assertEquals(self.client.lrange('a', 0, -1), []) + self.make_list('a', 'abc') + self.assertEquals(self.client.lpushx('a', 'd'), 4) + self.assertEquals(self.client.lrange('a', 0, -1), ['d', 'a', 'b', 'c']) + + def test_lrange(self): + # no key + self.assertEquals(self.client.lrange('a', 0, 1), []) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lrange, 'a', 0, 1) + del self.client['a'] + # real logic + self.make_list('a', 'abcde') + self.assertEquals(self.client.lrange('a', 0, 2), ['a', 'b', 'c']) + self.assertEquals(self.client.lrange('a', 2, 10), ['c', 'd', 'e']) + + def test_lrem(self): + # no key + self.assertEquals(self.client.lrem('a', 'foo'), 0) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lrem, 'a', 'b') + del self.client['a'] + # real logic + self.make_list('a', 'aaaa') + self.assertEquals(self.client.lrem('a', 'a', 1), 1) + self.assertEquals(self.client.lrange('a', 0, 3), ['a', 'a', 'a']) + self.assertEquals(self.client.lrem('a', 'a'), 3) + # remove all the elements in the list means the key is deleted + self.assertEquals(self.client.lrange('a', 0, 1), []) + + def test_lset(self): + # no key + self.assertRaises(redis.ResponseError, self.client.lset, 'a', 1, 'b') + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lset, 'a', 1, 'b') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.lrange('a', 0, 2), ['a', 'b', 'c']) + self.assert_(self.client.lset('a', 1, 'd')) + self.assertEquals(self.client.lrange('a', 0, 2), ['a', 'd', 'c']) + + def test_ltrim(self): + # no key -- TODO: Not sure why this is actually true. + self.assert_(self.client.ltrim('a', 0, 2)) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.ltrim, 'a', 0, 2) + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assert_(self.client.ltrim('a', 0, 1)) + self.assertEquals(self.client.lrange('a', 0, 5), ['a', 'b']) + + def test_lpop(self): + # no key + self.assertEquals(self.client.lpop('a'), None) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.lpop, 'a') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.lpop('a'), 'a') + self.assertEquals(self.client.lpop('a'), 'b') + self.assertEquals(self.client.lpop('a'), 'c') + self.assertEquals(self.client.lpop('a'), None) + + def test_rpop(self): + # no key + self.assertEquals(self.client.rpop('a'), None) + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.rpop, 'a') + del self.client['a'] + # real logic + self.make_list('a', 'abc') + self.assertEquals(self.client.rpop('a'), 'c') + self.assertEquals(self.client.rpop('a'), 'b') + self.assertEquals(self.client.rpop('a'), 'a') + self.assertEquals(self.client.rpop('a'), None) + + def test_rpoplpush(self): + # no src key + self.make_list('b', ['b1']) + self.assertEquals(self.client.rpoplpush('a', 'b'), None) + # no dest key + self.assertEquals(self.client.rpoplpush('b', 'a'), 'b1') + self.assertEquals(self.client.lindex('a', 0), 'b1') + del self.client['a'] + del self.client['b'] + # src key is not a list + self.client['a'] = 'a1' + self.assertRaises(redis.ResponseError, self.client.rpoplpush, 'a', 'b') + del self.client['a'] + # dest key is not a list + self.make_list('a', ['a1']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.rpoplpush, 'a', 'b') + del self.client['a'] + del self.client['b'] + # real logic + self.make_list('a', ['a1', 'a2', 'a3']) + self.make_list('b', ['b1', 'b2', 'b3']) + self.assertEquals(self.client.rpoplpush('a', 'b'), 'a3') + self.assertEquals(self.client.lrange('a', 0, 2), ['a1', 'a2']) + self.assertEquals(self.client.lrange('b', 0, 4), + ['a3', 'b1', 'b2', 'b3']) + + def test_rpush(self): + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.rpush, 'a', 'a') + del self.client['a'] + # real logic + version = self.client.info()['redis_version'] + if StrictVersion(version) >= StrictVersion('1.3.4'): + self.assertEqual(1, self.client.rpush('a', 'a')) + self.assertEqual(2, self.client.rpush('a', 'b')) + else: + self.assert_(self.client.rpush('a', 'a')) + self.assert_(self.client.rpush('a', 'b')) + self.assertEquals(self.client.lindex('a', 0), 'a') + self.assertEquals(self.client.lindex('a', 1), 'b') + + def test_rpushx(self): + # key is not a list + self.client['a'] = 'b' + self.assertRaises(redis.ResponseError, self.client.rpushx, 'a', 'a') + del self.client['a'] + # real logic + self.assertEquals(self.client.rpushx('a', 'b'), 0) + self.assertEquals(self.client.lrange('a', 0, -1), []) + self.make_list('a', 'abc') + self.assertEquals(self.client.rpushx('a', 'd'), 4) + self.assertEquals(self.client.lrange('a', 0, -1), ['a', 'b', 'c', 'd']) + + # Set commands + def make_set(self, name, l): + for i in l: + self.client.sadd(name, i) + + def test_sadd(self): + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.sadd, 'a', 'a1') + del self.client['a'] + # real logic + members = set(['a1', 'a2', 'a3']) + self.make_set('a', members) + self.assertEquals(self.client.smembers('a'), members) + + def test_scard(self): + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.scard, 'a') + del self.client['a'] + # real logic + self.make_set('a', 'abc') + self.assertEquals(self.client.scard('a'), 3) + + def test_sdiff(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sdiff, ['a', 'b']) + del self.client['b'] + # real logic + self.make_set('b', ['b1', 'a2', 'b3']) + self.assertEquals(self.client.sdiff(['a', 'b']), set(['a1', 'a3'])) + + def test_sdiffstore(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sdiffstore, + 'c', ['a', 'b']) + del self.client['b'] + self.make_set('b', ['b1', 'a2', 'b3']) + # dest key always gets overwritten, even if it's not a set, so don't + # test for that + # real logic + self.assertEquals(self.client.sdiffstore('c', ['a', 'b']), 2) + self.assertEquals(self.client.smembers('c'), set(['a1', 'a3'])) + + def test_sinter(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sinter, ['a', 'b']) + del self.client['b'] + # real logic + self.make_set('b', ['a1', 'b2', 'a3']) + self.assertEquals(self.client.sinter(['a', 'b']), set(['a1', 'a3'])) + + def test_sinterstore(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sinterstore, + 'c', ['a', 'b']) + del self.client['b'] + self.make_set('b', ['a1', 'b2', 'a3']) + # dest key always gets overwritten, even if it's not a set, so don't + # test for that + # real logic + self.assertEquals(self.client.sinterstore('c', ['a', 'b']), 2) + self.assertEquals(self.client.smembers('c'), set(['a1', 'a3'])) + + def test_sismember(self): + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.sismember, 'a', 'a') + del self.client['a'] + # real logic + self.make_set('a', 'abc') + self.assertEquals(self.client.sismember('a', 'a'), True) + self.assertEquals(self.client.sismember('a', 'b'), True) + self.assertEquals(self.client.sismember('a', 'c'), True) + self.assertEquals(self.client.sismember('a', 'd'), False) + + def test_smembers(self): + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.smembers, 'a') + del self.client['a'] + # set doesn't exist + self.assertEquals(self.client.smembers('a'), set()) + # real logic + self.make_set('a', 'abc') + self.assertEquals(self.client.smembers('a'), set(['a', 'b', 'c'])) + + def test_smove(self): + # src key is not set + self.make_set('b', ['b1', 'b2']) + self.assertEquals(self.client.smove('a', 'b', 'a1'), 0) + # src key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.smove, + 'a', 'b', 'a1') + del self.client['a'] + self.make_set('a', ['a1', 'a2']) + # dest key is not a set + del self.client['b'] + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.smove, + 'a', 'b', 'a1') + del self.client['b'] + self.make_set('b', ['b1', 'b2']) + # real logic + self.assert_(self.client.smove('a', 'b', 'a1')) + self.assertEquals(self.client.smembers('a'), set(['a2'])) + self.assertEquals(self.client.smembers('b'), set(['b1', 'b2', 'a1'])) + + def test_spop(self): + # key is not set + self.assertEquals(self.client.spop('a'), None) + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.spop, 'a') + del self.client['a'] + # real logic + self.make_set('a', 'abc') + value = self.client.spop('a') + self.assert_(value in 'abc') + self.assertEquals(self.client.smembers('a'), set('abc') - set(value)) + + def test_srandmember(self): + # key is not set + self.assertEquals(self.client.srandmember('a'), None) + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.srandmember, 'a') + del self.client['a'] + # real logic + self.make_set('a', 'abc') + self.assert_(self.client.srandmember('a') in 'abc') + + def test_srem(self): + # key is not set + self.assertEquals(self.client.srem('a', 'a'), False) + # key is not a set + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.srem, 'a', 'a') + del self.client['a'] + # real logic + self.make_set('a', 'abc') + self.assertEquals(self.client.srem('a', 'd'), False) + self.assertEquals(self.client.srem('a', 'b'), True) + self.assertEquals(self.client.smembers('a'), set('ac')) + + def test_sunion(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sunion, ['a', 'b']) + del self.client['b'] + # real logic + self.make_set('b', ['a1', 'b2', 'a3']) + self.assertEquals(self.client.sunion(['a', 'b']), + set(['a1', 'a2', 'a3', 'b2'])) + + def test_sunionstore(self): + # some key is not a set + self.make_set('a', ['a1', 'a2', 'a3']) + self.client['b'] = 'b' + self.assertRaises(redis.ResponseError, self.client.sunionstore, + 'c', ['a', 'b']) + del self.client['b'] + self.make_set('b', ['a1', 'b2', 'a3']) + # dest key always gets overwritten, even if it's not a set, so don't + # test for that + # real logic + self.assertEquals(self.client.sunionstore('c', ['a', 'b']), 4) + self.assertEquals(self.client.smembers('c'), + set(['a1', 'a2', 'a3', 'b2'])) + + # SORTED SETS + def make_zset(self, name, d): + for k,v in d.items(): + self.client.zadd(name, k, v) + + def test_zadd(self): + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zrange('a', 0, 3), ['a1', 'a2', 'a3']) + + def test_zcard(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zcard, 'a') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zcard('a'), 3) + + def test_zcount(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zcount, 'a', 0, 0) + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zcount('a', '-inf', '+inf'), 3) + self.assertEquals(self.client.zcount('a', 1, 2), 2) + self.assertEquals(self.client.zcount('a', 10, 20), 0) + + def test_zincrby(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zincrby, 'a', 'a1') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zincrby('a', 'a2'), 3.0) + self.assertEquals(self.client.zincrby('a', 'a3', amount=5), 8.0) + self.assertEquals(self.client.zscore('a', 'a2'), 3.0) + self.assertEquals(self.client.zscore('a', 'a3'), 8.0) + + def test_zinterstore(self): + self.make_zset('a', {'a1': 1, 'a2': 1, 'a3': 1}) + self.make_zset('b', {'a1': 2, 'a3': 2, 'a4': 2}) + self.make_zset('c', {'a1': 6, 'a3': 5, 'a4': 4}) + + # sum, no weight + self.assert_(self.client.zinterstore('z', ['a', 'b', 'c'])) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a3', 8), ('a1', 9)] + ) + + # max, no weight + self.assert_( + self.client.zinterstore('z', ['a', 'b', 'c'], aggregate='MAX') + ) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a3', 5), ('a1', 6)] + ) + + # with weight + self.assert_(self.client.zinterstore('z', {'a': 1, 'b': 2, 'c': 3})) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a3', 20), ('a1', 23)] + ) + + + def test_zrange(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrange, 'a', 0, 1) + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zrange('a', 0, 1), ['a1', 'a2']) + self.assertEquals(self.client.zrange('a', 1, 2), ['a2', 'a3']) + self.assertEquals(self.client.zrange('a', 0, 1, withscores=True), + [('a1', 1.0), ('a2', 2.0)]) + self.assertEquals(self.client.zrange('a', 1, 2, withscores=True), + [('a2', 2.0), ('a3', 3.0)]) + # a non existant key should return empty list + self.assertEquals(self.client.zrange('b', 0, 1, withscores=True), []) + + + def test_zrangebyscore(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrangebyscore, + 'a', 0, 1) + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) + self.assertEquals(self.client.zrangebyscore('a', 2, 4), + ['a2', 'a3', 'a4']) + self.assertEquals(self.client.zrangebyscore('a', 2, 4, start=1, num=2), + ['a3', 'a4']) + self.assertEquals(self.client.zrangebyscore('a', 2, 4, withscores=True), + [('a2', 2.0), ('a3', 3.0), ('a4', 4.0)]) + # a non existant key should return empty list + self.assertEquals(self.client.zrangebyscore('b', 0, 1, withscores=True), []) + + def test_zrank(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrank, 'a', 'a4') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) + self.assertEquals(self.client.zrank('a', 'a1'), 0) + self.assertEquals(self.client.zrank('a', 'a2'), 1) + self.assertEquals(self.client.zrank('a', 'a3'), 2) + self.assertEquals(self.client.zrank('a', 'a4'), 3) + self.assertEquals(self.client.zrank('a', 'a5'), 4) + # non-existent value in zset + self.assertEquals(self.client.zrank('a', 'a6'), None) + + def test_zrem(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrem, 'a', 'a1') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zrem('a', 'a2'), True) + self.assertEquals(self.client.zrange('a', 0, 5), ['a1', 'a3']) + self.assertEquals(self.client.zrem('a', 'b'), False) + self.assertEquals(self.client.zrange('a', 0, 5), ['a1', 'a3']) + + def test_zremrangebyrank(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zremrangebyscore, + 'a', 0, 1) + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) + self.assertEquals(self.client.zremrangebyrank('a', 1, 3), 3) + self.assertEquals(self.client.zrange('a', 0, 5), ['a1', 'a5']) + + def test_zremrangebyscore(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zremrangebyscore, + 'a', 0, 1) + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3, 'a4': 4, 'a5': 5}) + self.assertEquals(self.client.zremrangebyscore('a', 2, 4), 3) + self.assertEquals(self.client.zrange('a', 0, 5), ['a1', 'a5']) + self.assertEquals(self.client.zremrangebyscore('a', 2, 4), 0) + self.assertEquals(self.client.zrange('a', 0, 5), ['a1', 'a5']) + + def test_zrevrange(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrevrange, + 'a', 0, 1) + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.zrevrange('a', 0, 1), ['a3', 'a2']) + self.assertEquals(self.client.zrevrange('a', 1, 2), ['a2', 'a1']) + self.assertEquals(self.client.zrevrange('a', 0, 1, withscores=True), + [('a3', 3.0), ('a2', 2.0)]) + self.assertEquals(self.client.zrevrange('a', 1, 2, withscores=True), + [('a2', 2.0), ('a1', 1.0)]) + # a non existant key should return empty list + self.assertEquals(self.client.zrange('b', 0, 1, withscores=True), []) + + def test_zrevrank(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zrevrank, 'a', 'a4') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 5, 'a2': 4, 'a3': 3, 'a4': 2, 'a5': 1}) + self.assertEquals(self.client.zrevrank('a', 'a1'), 0) + self.assertEquals(self.client.zrevrank('a', 'a2'), 1) + self.assertEquals(self.client.zrevrank('a', 'a3'), 2) + self.assertEquals(self.client.zrevrank('a', 'a4'), 3) + self.assertEquals(self.client.zrevrank('a', 'a5'), 4) + self.assertEquals(self.client.zrevrank('a', 'b'), None) + + def test_zscore(self): + # key is not a zset + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.zscore, 'a', 'a1') + del self.client['a'] + # real logic + self.make_zset('a', {'a1': 0, 'a2': 1, 'a3': 2}) + self.assertEquals(self.client.zscore('a', 'a1'), 0.0) + self.assertEquals(self.client.zscore('a', 'a2'), 1.0) + # test a non-existant member + self.assertEquals(self.client.zscore('a', 'a4'), None) + + def test_zunionstore(self): + self.make_zset('a', {'a1': 1, 'a2': 1, 'a3': 1}) + self.make_zset('b', {'a1': 2, 'a3': 2, 'a4': 2}) + self.make_zset('c', {'a1': 6, 'a4': 5, 'a5': 4}) + + # sum, no weight + self.assert_(self.client.zunionstore('z', ['a', 'b', 'c'])) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a2', 1), ('a3', 3), ('a5', 4), ('a4', 7), ('a1', 9)] + ) + + # max, no weight + self.assert_( + self.client.zunionstore('z', ['a', 'b', 'c'], aggregate='MAX') + ) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a2', 1), ('a3', 2), ('a5', 4), ('a4', 5), ('a1', 6)] + ) + + # with weight + self.assert_(self.client.zunionstore('z', {'a': 1, 'b': 2, 'c': 3})) + self.assertEquals( + self.client.zrange('z', 0, -1, withscores=True), + [('a2', 1), ('a3', 5), ('a5', 12), ('a4', 19), ('a1', 23)] + ) + + + # HASHES + def make_hash(self, key, d): + for k,v in d.iteritems(): + self.client.hset(key, k, v) + + def test_hget_and_hset(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hget, 'a', 'a1') + del self.client['a'] + # no key + self.assertEquals(self.client.hget('a', 'a1'), None) + # real logic + self.make_hash('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.hget('a', 'a1'), '1') + self.assertEquals(self.client.hget('a', 'a2'), '2') + self.assertEquals(self.client.hget('a', 'a3'), '3') + self.assertEquals(self.client.hset('a', 'a2', 5), 0) + self.assertEquals(self.client.hget('a', 'a2'), '5') + self.assertEquals(self.client.hset('a', 'a4', 4), 1) + self.assertEquals(self.client.hget('a', 'a4'), '4') + # key inside of hash that doesn't exist returns null value + self.assertEquals(self.client.hget('a', 'b'), None) + + def test_hsetnx(self): + # Initially set the hash field + self.client.hsetnx('a', 'a1', 1) + self.assertEqual(self.client.hget('a', 'a1'), '1') + # Try and set the existing hash field to a different value + self.client.hsetnx('a', 'a1', 2) + self.assertEqual(self.client.hget('a', 'a1'), '1') + + def test_hmset(self): + d = {'a': '1', 'b': '2', 'c': '3'} + self.assert_(self.client.hmset('foo', d)) + self.assertEqual(self.client.hgetall('foo'), d) + self.assertRaises(redis.ResponseError, self.client.hmset, 'foo', {}) + + def test_hmget(self): + d = {'a': 1, 'b': 2, 'c': 3} + self.assert_(self.client.hmset('foo', d)) + self.assertEqual(self.client.hmget('foo', ['a', 'b', 'c']), ['1', '2', '3']) + self.assertEqual(self.client.hmget('foo', ['a', 'c']), ['1', '3']) + + def test_hmget_empty(self): + self.assertEqual(self.client.hmget('foo', ['a', 'b']), [None, None]) + + def test_hmget_no_keys(self): + self.assertRaises(redis.ResponseError, self.client.hmget, 'foo', []) + + def test_hdel(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hdel, 'a', 'a1') + del self.client['a'] + # no key + self.assertEquals(self.client.hdel('a', 'a1'), False) + # real logic + self.make_hash('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.hget('a', 'a2'), '2') + self.assert_(self.client.hdel('a', 'a2')) + self.assertEquals(self.client.hget('a', 'a2'), None) + + def test_hexists(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hexists, 'a', 'a1') + del self.client['a'] + # no key + self.assertEquals(self.client.hexists('a', 'a1'), False) + # real logic + self.make_hash('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.hexists('a', 'a1'), True) + self.assertEquals(self.client.hexists('a', 'a4'), False) + self.client.hdel('a', 'a1') + self.assertEquals(self.client.hexists('a', 'a1'), False) + + def test_hgetall(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hgetall, 'a') + del self.client['a'] + # no key + self.assertEquals(self.client.hgetall('a'), {}) + # real logic + h = {'a1': '1', 'a2': '2', 'a3': '3'} + self.make_hash('a', h) + remote_hash = self.client.hgetall('a') + self.assertEquals(h, remote_hash) + + def test_hincrby(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hincrby, 'a', 'a1') + del self.client['a'] + # no key should create the hash and incr the key's value to 1 + self.assertEquals(self.client.hincrby('a', 'a1'), 1) + # real logic + self.assertEquals(self.client.hincrby('a', 'a1'), 2) + self.assertEquals(self.client.hincrby('a', 'a1', amount=2), 4) + # negative values decrement + self.assertEquals(self.client.hincrby('a', 'a1', amount=-3), 1) + # hash that exists, but key that doesn't + self.assertEquals(self.client.hincrby('a', 'a2', amount=3), 3) + # finally a key that's not an int + self.client.hset('a', 'a3', 'foo') + self.assertRaises(redis.ResponseError, self.client.hincrby, 'a', 'a3') + + + def test_hkeys(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hkeys, 'a') + del self.client['a'] + # no key + self.assertEquals(self.client.hkeys('a'), []) + # real logic + h = {'a1': '1', 'a2': '2', 'a3': '3'} + self.make_hash('a', h) + keys = h.keys() + keys.sort() + remote_keys = self.client.hkeys('a') + remote_keys.sort() + self.assertEquals(keys, remote_keys) + + def test_hlen(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hlen, 'a') + del self.client['a'] + # no key + self.assertEquals(self.client.hlen('a'), 0) + # real logic + self.make_hash('a', {'a1': 1, 'a2': 2, 'a3': 3}) + self.assertEquals(self.client.hlen('a'), 3) + self.client.hdel('a', 'a3') + self.assertEquals(self.client.hlen('a'), 2) + + def test_hvals(self): + # key is not a hash + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.hvals, 'a') + del self.client['a'] + # no key + self.assertEquals(self.client.hvals('a'), []) + # real logic + h = {'a1': '1', 'a2': '2', 'a3': '3'} + self.make_hash('a', h) + vals = h.values() + vals.sort() + remote_vals = self.client.hvals('a') + remote_vals.sort() + self.assertEquals(vals, remote_vals) + + # SORT + def test_sort_bad_key(self): + # key is not set + self.assertEquals(self.client.sort('a'), []) + # key is a string value + self.client['a'] = 'a' + self.assertRaises(redis.ResponseError, self.client.sort, 'a') + del self.client['a'] + + def test_sort_basic(self): + self.make_list('a', '3214') + self.assertEquals(self.client.sort('a'), ['1', '2', '3', '4']) + + def test_sort_limited(self): + self.make_list('a', '3214') + self.assertEquals(self.client.sort('a', start=1, num=2), ['2', '3']) + + def test_sort_by(self): + self.client['score:1'] = 8 + self.client['score:2'] = 3 + self.client['score:3'] = 5 + self.make_list('a_values', '123') + self.assertEquals(self.client.sort('a_values', by='score:*'), + ['2', '3', '1']) + + def test_sort_get(self): + self.client['user:1'] = 'u1' + self.client['user:2'] = 'u2' + self.client['user:3'] = 'u3' + self.make_list('a', '231') + self.assertEquals(self.client.sort('a', get='user:*'), + ['u1', 'u2', 'u3']) + + def test_sort_get_multi(self): + self.client['user:1'] = 'u1' + self.client['user:2'] = 'u2' + self.client['user:3'] = 'u3' + self.make_list('a', '231') + self.assertEquals(self.client.sort('a', get=('user:*', '#')), + ['u1', '1', 'u2', '2', 'u3', '3']) + + def test_sort_desc(self): + self.make_list('a', '231') + self.assertEquals(self.client.sort('a', desc=True), ['3', '2', '1']) + + def test_sort_alpha(self): + self.make_list('a', 'ecbda') + self.assertEquals(self.client.sort('a', alpha=True), + ['a', 'b', 'c', 'd', 'e']) + + def test_sort_store(self): + self.make_list('a', '231') + self.assertEquals(self.client.sort('a', store='sorted_values'), 3) + self.assertEquals(self.client.lrange('sorted_values', 0, 5), + ['1', '2', '3']) + + def test_sort_all_options(self): + self.client['user:1:username'] = 'zeus' + self.client['user:2:username'] = 'titan' + self.client['user:3:username'] = 'hermes' + self.client['user:4:username'] = 'hercules' + self.client['user:5:username'] = 'apollo' + self.client['user:6:username'] = 'athena' + self.client['user:7:username'] = 'hades' + self.client['user:8:username'] = 'dionysus' + + self.client['user:1:favorite_drink'] = 'yuengling' + self.client['user:2:favorite_drink'] = 'rum' + self.client['user:3:favorite_drink'] = 'vodka' + self.client['user:4:favorite_drink'] = 'milk' + self.client['user:5:favorite_drink'] = 'pinot noir' + self.client['user:6:favorite_drink'] = 'water' + self.client['user:7:favorite_drink'] = 'gin' + self.client['user:8:favorite_drink'] = 'apple juice' + + self.make_list('gods', '12345678') + num = self.client.sort('gods', start=2, num=4, by='user:*:username', + get='user:*:favorite_drink', desc=True, alpha=True, store='sorted') + self.assertEquals(num, 4) + self.assertEquals(self.client.lrange('sorted', 0, 10), + ['vodka', 'milk', 'gin', 'apple juice']) + + # PUBSUB + def test_pubsub(self): + # create a new client to not polute the existing one + r = self.get_client() + channels = ('a1', 'a2', 'a3') + for c in channels: + r.subscribe(c) + # state variable should be flipped + self.assertEquals(r.subscribed, True) + + channels_to_publish_to = channels + ('a4',) + messages_per_channel = 4 + def publish(): + for i in range(messages_per_channel): + for c in channels_to_publish_to: + self.client.publish(c, 'a message') + time.sleep(0.01) + for c in channels_to_publish_to: + self.client.publish(c, 'unsubscribe') + time.sleep(0.01) + + messages = [] + # should receive a message for each subscribe/unsubscribe command + # plus a message for each iteration of the loop * num channels + # we hide the data messages that tell the client to unsubscribe + num_messages_to_expect = len(channels)*2 + \ + (messages_per_channel*len(channels)) + t = threading.Thread(target=publish) + t.start() + for msg in r.listen(): + if msg['data'] == 'unsubscribe': + r.unsubscribe(msg['channel']) + else: + messages.append(msg) + + self.assertEquals(r.subscribed, False) + self.assertEquals(len(messages), num_messages_to_expect) + sent_types, sent_channels = {}, {} + for msg in messages: + msg_type = msg['type'] + channel = msg['channel'] + sent_types.setdefault(msg_type, 0) + sent_types[msg_type] += 1 + if msg_type == 'message': + sent_channels.setdefault(channel, 0) + sent_channels[channel] += 1 + for channel in channels: + self.assert_(channel in sent_channels) + self.assertEquals(sent_channels[channel], messages_per_channel) + self.assertEquals(sent_types['subscribe'], len(channels)) + self.assertEquals(sent_types['unsubscribe'], len(channels)) + self.assertEquals(sent_types['message'], + len(channels) * messages_per_channel) + + ## BINARY SAFE + # TODO add more tests + def test_binary_get_set(self): + self.assertTrue(self.client.set(' foo bar ', '123')) + self.assertEqual(self.client.get(' foo bar '), '123') + + self.assertTrue(self.client.set(' foo\r\nbar\r\n ', '456')) + self.assertEqual(self.client.get(' foo\r\nbar\r\n '), '456') + + self.assertTrue(self.client.set(' \r\n\t\x07\x13 ', '789')) + self.assertEqual(self.client.get(' \r\n\t\x07\x13 '), '789') + + self.assertEqual(sorted(self.client.keys('*')), [' \r\n\t\x07\x13 ', ' foo\r\nbar\r\n ', ' foo bar ']) + + self.assertTrue(self.client.delete(' foo bar ')) + self.assertTrue(self.client.delete(' foo\r\nbar\r\n ')) + self.assertTrue(self.client.delete(' \r\n\t\x07\x13 ')) + + def test_binary_lists(self): + mapping = {'foo bar': '123', + 'foo\r\nbar\r\n': '456', + 'foo\tbar\x07': '789', + } + # fill in lists + for key, value in mapping.iteritems(): + for c in value: + self.assertTrue(self.client.rpush(key, c)) + + # check that KEYS returns all the keys as they are + self.assertEqual(sorted(self.client.keys('*')), sorted(mapping.keys())) + + # check that it is possible to get list content by key name + for key in mapping.keys(): + self.assertEqual(self.client.lrange(key, 0, -1), list(mapping[key])) diff --git a/templates/mix/group.html b/templates/mix/group.html index 4648d94..ad35758 100644 --- a/templates/mix/group.html +++ b/templates/mix/group.html @@ -16,14 +16,26 @@