Top

telemeta.models.query module

# -*- coding: utf-8 -*-
# Copyright (C) 2007-2010 Samalyse SARL
# Copyright (C) 2010-2011 Parisson SARL
#
# This file is part of Telemeta.

# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.

# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.

# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Olivier Guilyardi <olivier@samalyse.com>
#          David LIPSZYC <davidlipszyc@gmail.com>
#          Guillaume Pellerin <yomguy@parisson.com>

from django.conf import settings
from django.db.models import Q, Max, Min
from telemeta.models.core import *
from telemeta.util.unaccent import unaccent, unaccent_icmp
from telemeta.models.enum import EthnicGroup
import re

engine = settings.DATABASES['default']['ENGINE']


class MediaItemQuerySet(CoreQuerySet):
    "Base class for all media item query sets"

    def quick_search(self, pattern):
        "Perform a quick search on code, title and collector name"

        # from telemeta.models.media import MediaItem
        # pattern = pattern.strip()
        # mod = MediaItem()
        # fields = mod.to_dict()
        # keys =  fields.keys()
        # q = self.by_fuzzy_collector_q(pattern)
        # for field in keys:
        #     field_str = str(mod._meta.get_field(field))
        #     if 'CharField' in field_str or 'TextField' in field_str:
        #         q = q | word_search_q(field, pattern)

        q = ( Q(code__contains=pattern) |
            Q(old_code__contains=pattern) |
            word_search_q('title', pattern) |
            word_search_q('comment', pattern) |
            self.by_fuzzy_collector_q(pattern) )

        return self.filter(q)


    def without_collection(self):
        "Find items which do not belong to any collection"
        return self.extra(
            where = ["collection_id NOT IN (SELECT id FROM media_collections)"]);

    def by_public_id(self, public_id):
        "Find items by public_id"
        return self.filter(public_id=public_id)

    def by_recording_date(self, from_date, to_date = None):
        "Find items by recording date"
        if to_date is None:
            return (self.filter(recorded_from_date__lte=from_date, recorded_to_date__gte=from_date))
        else :
            return (self.filter(Q(recorded_from_date__range=(from_date, to_date))
                                | Q(recorded_to_date__range=(from_date, to_date))))

    def by_title(self, pattern):
        "Find items by title"
        # to (sort of) sync with models.media.MediaItem.get_title()
        return self.filter(word_search_q("title", pattern) |
                           (Q(title="") & word_search_q("collection__title", pattern)))

    def by_publish_year(self, from_year, to_year = None):
        "Find items by publishing year"
        if to_year is None:
            to_year = from_year
        return self.filter(collection__year_published__range=(from_year, to_year))

    def by_change_time(self, from_time = None, until_time = None):
        "Find items by last change time"
        return self._by_change_time('item', from_time, until_time)

    def by_location(self, location):
        "Find items by location"
        return self.filter(location__in=location.apparented())

    @staticmethod
    def __name_cmp(obj1, obj2):
        return unaccent_icmp(obj1.name, obj2.name)

    def locations(self):
        from telemeta.models import Location, LocationRelation
        l = self.values('location')
        c = self.values('location__current_location')
        r = LocationRelation.objects.filter(location__in=l).values('ancestor_location')
        return Location.objects.filter(Q(pk__in=l) | Q(pk__in=r) | Q(pk__in=c))

    def countries(self, group_by_continent=False):
        countries = []
        from telemeta.models import Location
        for id in self.filter(location__isnull=False).values_list('location', flat=True).distinct():
            location = Location.objects.get(pk=id)
            for l in location.countries():
                c = l.current_location
                if not c in countries:
                    countries.append(c)

        if group_by_continent:
            grouped = {}

            for country in countries:
                for continent in country.continents():
                    if not grouped.has_key(continent):
                        grouped[continent] = []

                    grouped[continent].append(country)

            keys = grouped.keys()
            keys.sort(self.__name_cmp)
            ordered = []
            for c in keys:
                grouped[c].sort(self.__name_cmp)
                ordered.append({'continent': c, 'countries': grouped[c]})

            countries = ordered
        else:
            countries.sort(self.__name_cmp)

        return countries

    def virtual(self, *args):
        qs = self
        need_collection = False
        related = []
        from telemeta.models import Location
        for f in args:
            if f == 'apparent_collector':
                if not 'sqlite3' in engine and not 'postgresql_psycopg2' in engine:
                    related.append('collection')
                    qs = qs.extra(select={f:
                        'IF(collector_from_collection, '
                            'IF(media_collections.collector_is_creator, '
                               'media_collections.creator, '
                               'media_collections.collector),'
                            'media_items.collector)'})
            elif f == 'country_or_continent':
                related.append('location')
                if not 'sqlite3' in engine and not 'postgresql_psycopg2' in engine:
                    qs = qs.extra(select={f:
                        'IF(locations.type = ' + str(Location.COUNTRY) + ' '
                        'OR locations.type = ' + str(Location.CONTINENT) + ','
                        'locations.name, '
                        '(SELECT l2.name FROM location_relations AS r INNER JOIN locations AS l2 '
                        'ON r.ancestor_location_id = l2.id '
                        'WHERE r.location_id = media_items.location_id AND l2.type = ' + str(Location.COUNTRY) + ' LIMIT 1))'
                    })
            else:
                raise Exception("Unsupported virtual field: %s" % f)

        if related:
            qs = qs.select_related(*related)

        return qs

    def ethnic_groups(self):
        ids = self.filter(ethnic_group__isnull=False).values('ethnic_group');
        return EthnicGroup.objects.filter(pk__in=ids)

    @staticmethod
    def by_fuzzy_collector_q(pattern):
        return (word_search_q('collection__creator', pattern) |
                word_search_q('collection__collector', pattern) |
                word_search_q('collector', pattern))

    def by_fuzzy_collector(self, pattern):
        return self.filter(self.by_fuzzy_collector_q(pattern))

    def sound(self):
        return self.filter(Q(file__contains='/') | Q(url__contains='/'))

    def sound_public(self):
        return self.filter(Q(file__contains='/') | Q(url__contains='/'),
                public_access='full', collection__public_access='full')

    def by_instrument(self, name):
        "Find items by instrument"
        from telemeta.models.instrument import Instrument, InstrumentAlias
        from telemeta.models.item import MediaItemPerformance
        instruments = Instrument.objects.filter(name__icontains=name)
        aliases = InstrumentAlias.objects.filter(name__icontains=name)
        perf = []
        performances = MediaItemPerformance.objects.filter(Q(instrument__in=instruments) | Q(alias__in=aliases))
        for performance in performances:
            perf.append(performance)
        return self.filter(performances__in=perf).distinct()

class MediaItemManager(CoreManager):
    "Manage media items queries"

    def get_query_set(self):
        "Return media query sets"
        return MediaItemQuerySet(self.model)

    def enriched(self):
        "Query set with additional virtual fields such as apparent_collector and country_or_continent"
        return self.get_query_set().virtual('apparent_collector', 'country_or_continent')

    def quick_search(self, *args, **kwargs):
        return self.get_query_set().quick_search(*args, **kwargs)
    quick_search.__doc__ = MediaItemQuerySet.quick_search.__doc__

    def without_collection(self, *args, **kwargs):
        return self.get_query_set().without_collection(*args, **kwargs)
    without_collection.__doc__ = MediaItemQuerySet.without_collection.__doc__

    def by_recording_date(self, *args, **kwargs):
        return self.get_query_set().by_recording_date(*args, **kwargs)
    by_recording_date.__doc__ = MediaItemQuerySet.by_recording_date.__doc__

    def by_title(self, *args, **kwargs):
        return self.get_query_set().by_title(*args, **kwargs)
    by_title.__doc__ = MediaItemQuerySet.by_title.__doc__

    def by_publish_year(self, *args, **kwargs):
        return self.get_query_set().by_publish_year(*args, **kwargs)
    by_publish_year.__doc__ = MediaItemQuerySet.by_publish_year.__doc__

    def by_change_time(self, *args, **kwargs):
        return self.get_query_set().by_change_time(*args, **kwargs)
    by_change_time.__doc__ = MediaItemQuerySet.by_change_time.__doc__

    def by_location(self, *args, **kwargs):
        return self.get_query_set().by_location(*args, **kwargs)
    by_location.__doc__ = MediaItemQuerySet.by_location.__doc__

    def sound(self, *args, **kwargs):
        return self.get_query_set().sound(*args, **kwargs)
    sound.__doc__ = MediaItemQuerySet.sound.__doc__

    def sound_public(self, *args, **kwargs):
        return self.get_query_set().sound_public(*args, **kwargs)
    sound_public.__doc__ = MediaItemQuerySet.sound_public.__doc__

    def by_instrument(self, *args, **kwargs):
        return self.get_query_set().by_instrument(*args, **kwargs)
    by_instrument.__doc__ = MediaItemQuerySet.by_instrument.__doc__


class MediaCollectionQuerySet(CoreQuerySet):

    def quick_search(self, pattern):
        "Perform a quick search on code, title and collector name"
        from telemeta.models.collection import MediaCollection
        pattern = pattern.strip()
        mod = MediaCollection()
        fields = mod.to_dict()
        keys =  fields.keys()
        q = self.by_fuzzy_collector_q(pattern)
        for field in keys:
            field_str = str(mod._meta.get_field(field))
            if 'CharField' in field_str or 'TextField' in field_str:
                q = q | word_search_q(field, pattern)
        return self.filter(q)

    def by_location(self, location):
        "Find collections by location"
        return self.filter(items__location__in=location.apparented()).distinct()

    def by_recording_year(self, from_year, to_year=None):
        "Find collections by recording year"
        if to_year is None:
            return (self.filter(recorded_from_year__lte=from_year, recorded_to_year__gte=from_year))
        else:
            return (self.filter(Q(recorded_from_year__range=(from_year, to_year)) |
                    Q(recorded_to_year__range=(from_year, to_year))))

    def by_publish_year(self, from_year, to_year=None):
        "Find collections by publishing year"
        if to_year is None:
            to_year = from_year
        return self.filter(year_published__range=(from_year, to_year))

    def by_ethnic_group(self, group):
        "Find collections by ethnic group"
        return self.filter(items__ethnic_group=group).distinct()

    def by_change_time(self, from_time=None, until_time=None):
        "Find collections between two dates"
        return self._by_change_time('collection', from_time, until_time)

    def virtual(self, *args):
        qs = self
        for f in args:
            if f == 'apparent_collector':
                if not 'sqlite3' in engine and not 'postgresql_psycopg2' in engine:
                    qs = qs.extra(select={f: 'IF(media_collections.collector_is_creator, '
                                         'media_collections.creator, media_collections.collector)'})
            else:
                raise Exception("Unsupported virtual field: %s" % f)

        return qs

    def recording_year_range(self):
        from_max = self.aggregate(Max('recorded_from_year'))['recorded_from_year__max']
        to_max   = self.aggregate(Max('recorded_to_year'))['recorded_to_year__max']
        year_max = max(from_max, to_max)

        from_min = self.filter(recorded_from_year__gt=0).aggregate(Min('recorded_from_year'))['recorded_from_year__min']
        to_min   = self.filter(recorded_to_year__gt=0).aggregate(Min('recorded_to_year'))['recorded_to_year__min']
        year_min = min(from_min, to_min)

        if not year_max:
            year_max = year_min
        elif not year_min:
            year_min = year_max

        return year_min, year_max

    def publishing_year_range(self):
        year_max = self.aggregate(Max('year_published'))['year_published__max']
        year_min = self.filter(year_published__gt=0).aggregate(Min('year_published'))['year_published__min']

        return year_min, year_max

    @staticmethod
    def by_fuzzy_collector_q(pattern):
        return word_search_q('creator', pattern) | word_search_q('collector', pattern)

    def by_fuzzy_collector(self, pattern):
        return self.filter(self.by_fuzzy_collector_q(pattern))

    def sound(self):
        return self.filter(Q(items__file__contains='/') | Q(items__url__contains='/')).distinct()

    def by_instrument(self, name):
        "Find collections by instrument"
        from telemeta.models.item import MediaItemPerformance
        from telemeta.models.instrument import Instrument, InstrumentAlias
        instruments = Instrument.objects.filter(name__icontains=name)
        aliases = InstrumentAlias.objects.filter(name__icontains=name)
        items = []
        performances = MediaItemPerformance.objects.filter(Q(instrument__in=instruments) | Q(alias__in=aliases))
        for performance in performances:
            items.append(performance.media_item)
        return self.filter(items__in=items).distinct()


class MediaCollectionManager(CoreManager):
    "Manage collection queries"

    def get_query_set(self):
        "Return the collection query"
        return MediaCollectionQuerySet(self.model)

    def enriched(self):
        "Query set with additional virtual fields such as apparent_collector"
        return self.get_query_set().virtual('apparent_collector')

    def quick_search(self, *args, **kwargs):
        return self.get_query_set().quick_search(*args, **kwargs)
    quick_search.__doc__ = MediaCollectionQuerySet.quick_search.__doc__

    def by_location(self, *args, **kwargs):
        return self.get_query_set().by_location(*args, **kwargs)
    by_location.__doc__ = MediaCollectionQuerySet.by_location.__doc__

    def by_recording_year(self, *args, **kwargs):
        return self.get_query_set().by_recording_year(*args, **kwargs)
    by_recording_year.__doc__ = MediaCollectionQuerySet.by_recording_year.__doc__

    def by_publish_year(self, *args, **kwargs):
        return self.get_query_set().by_publish_year(*args, **kwargs)
    by_publish_year.__doc__ = MediaCollectionQuerySet.by_publish_year.__doc__

    def by_ethnic_group(self, *args, **kwargs):
        return self.get_query_set().by_ethnic_group(*args, **kwargs)
    by_ethnic_group.__doc__ = MediaCollectionQuerySet.by_ethnic_group.__doc__

    def by_change_time(self, *args, **kwargs):
        return self.get_query_set().by_change_time(*args, **kwargs)
    by_change_time.__doc__ = MediaCollectionQuerySet.by_change_time.__doc__

    @staticmethod
    def __name_cmp(obj1, obj2):
        return unaccent_icmp(obj1.name, obj2.name)

    def sound(self, *args, **kwargs):
        return self.get_query_set().sound(*args, **kwargs)
    sound.__doc__ = MediaCollectionQuerySet.sound.__doc__

    def by_instrument(self, *args, **kwargs):
        return self.get_query_set().by_instrument(*args, **kwargs)
    by_instrument.__doc__ = MediaCollectionQuerySet.by_instrument.__doc__


class LocationQuerySet(CoreQuerySet):
    __flatname_map = None

    def by_flatname(self, flatname):
        map = self.flatname_map()
        return self.filter(pk=map[flatname])

    def flatname_map(self):
        if self.__class__.__flatname_map:
            return self.__class__.__flatname_map

        map = {}
        locations = self.filter(Q(type=self.model.COUNTRY) | Q(type=self.model.CONTINENT))
        for l in locations:
            flatname = unaccent(l.name).lower()
            flatname = re.sub('[^a-z]', '_', flatname)
            while map.has_key(flatname):
                flatname = '_' + flatname
            map[flatname] = l.id

        self.__class__.__flatname_map = map
        return map

    def current(self):
        return self.filter(id__in=self.values_list('current_location_id', flat=True)).distinct()


class LocationManager(CoreManager):

    def get_query_set(self):
        "Return location query set"
        return LocationQuerySet(self.model)

    def by_flatname(self, *args, **kwargs):
        return self.get_query_set().by_flatname(*args, **kwargs)
    by_flatname.__doc__ = LocationQuerySet.by_flatname.__doc__

    def flatname_map(self, *args, **kwargs):
        return self.get_query_set().flatname_map(*args, **kwargs)
    flatname_map.__doc__ = LocationQuerySet.flatname_map.__doc__


class MediaCorpusQuerySet(CoreQuerySet):
    "Base class for all media resource query sets"

    def quick_search(self, pattern):
        "Perform a quick search on text and char fields"
        from telemeta.models.corpus import MediaCorpus
        mod = MediaCorpus()
        pattern = pattern.strip()
        q = Q(code__contains=pattern)
        fields = mod.to_dict()
        keys =  fields.keys()

        for field in keys:
            field_str = str(mod._meta.get_field(field))
            if 'CharField' in field_str or 'TextField' in field_str:
                q = q | word_search_q(field, pattern)

        return self.filter(q)


class MediaCorpusManager(CoreManager):
    "Manage media resource queries"

    def get_query_set(self):
        "Return resource query sets"
        return MediaCorpusQuerySet(self.model)

    def quick_search(self, *args, **kwargs):
        return self.get_query_set().quick_search(*args, **kwargs)
    quick_search.__doc__ = MediaCorpusQuerySet.quick_search.__doc__


class MediaFondsQuerySet(CoreQuerySet):
    "Base class for all media resource query sets"

    def quick_search(self, pattern):
        "Perform a quick search on text and char fields"
        from telemeta.models.fonds import MediaFonds
        mod = MediaFonds()
        pattern = pattern.strip()
        q = Q(code__contains=pattern)
        fields = mod.to_dict()
        keys =  fields.keys()
        for field in keys:
            field_str = str(mod._meta.get_field(field))
            if 'CharField' in field_str or 'TextField' in field_str:
                q = q | word_search_q(field, pattern)
        return self.filter(q)


class MediaFondsManager(CoreManager):
    "Manage media resource queries"

    def get_query_set(self):
        "Return resource query sets"
        return MediaFondsQuerySet(self.model)

    def quick_search(self, *args, **kwargs):
        return self.get_query_set().quick_search(*args, **kwargs)
    quick_search.__doc__ = MediaFondsQuerySet.quick_search.__doc__


class InstrumentQuerySet(CoreQuerySet):
    "Base class for all media instrument query sets"

    def quick_search(self, pattern):
        "Perform a quick search on text and char fields"
        from telemeta.models.instrument import Instrument
        mod = Instrument()
        pattern = pattern.strip()
        q = Q(code__contains=pattern)
        fields = mod.to_dict()
        keys =  fields.keys()
        for field in keys:
            field_str = str(mod._meta.get_field(field))
            if 'CharField' in field_str or 'TextField' in field_str:
                q = q | word_search_q(field, pattern)
        return self.filter(q)


class InstrumentManager(CoreManager):
    "Manage instrument queries"

    def get_query_set(self):
        "Return instrument query sets"
        return InstrumentQuerySet(self.model)

    def quick_search(self, *args, **kwargs):
        return self.get_query_set().quick_search(*args, **kwargs)
    quick_search.__doc__ = InstrumentQuerySet.quick_search.__doc__

Module variables

var PUBLIC_ACCESS_CHOICES

var app_name

var code_linesep

var default_decoding

var default_encoding

var engine

var eol

var ext

var mime_type

var private_extra_types

var public_extra_types

var strict_code

Classes

class InstrumentManager

Manage instrument queries

class InstrumentManager(CoreManager):
    "Manage instrument queries"

    def get_query_set(self):
        "Return instrument query sets"
        return InstrumentQuerySet(self.model)

    def quick_search(self, *args, **kwargs):
        return self.get_query_set().quick_search(*args, **kwargs)
    quick_search.__doc__ = InstrumentQuerySet.quick_search.__doc__

Ancestors (in MRO)

  • InstrumentManager
  • telemeta.models.core.CoreManager
  • telemeta.models.core.EnhancedManager
  • django.db.models.manager.Manager
  • __builtin__.object

Class variables

var creation_counter

Instance variables

var db

Methods

def __init__(

self)

def __init__(self):
    super(Manager, self).__init__()
    self._set_creation_counter()
    self.model = None
    self._inherited = False
    self._db = None

def aggregate(

self, *args, **kwargs)

def aggregate(self, *args, **kwargs):
    return self.get_queryset().aggregate(*args, **kwargs)

def all(

self)

def all(self):
    return self.get_queryset()

def annotate(

self, *args, **kwargs)

def annotate(self, *args, **kwargs):
    return self.get_queryset().annotate(*args, **kwargs)

def bulk_create(

self, *args, **kwargs)

def bulk_create(self, *args, **kwargs):
    return self.get_queryset().bulk_create(*args, **kwargs)

def complex_filter(

self, *args, **kwargs)

def complex_filter(self, *args, **kwargs):
    return self.get_queryset().complex_filter(*args, **kwargs)

def contribute_to_class(

self, model, name)

def contribute_to_class(self, model, name):
    # TODO: Use weakref because of possible memory leak / circular reference.
    self.model = model
    # Only contribute the manager if the model is concrete
    if model._meta.abstract:
        setattr(model, name, AbstractManagerDescriptor(model))
    elif model._meta.swapped:
        setattr(model, name, SwappedManagerDescriptor(model))
    else:
    # if not model._meta.abstract and not model._meta.swapped:
        setattr(model, name, ManagerDescriptor(self))
    if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
        model._default_manager = self
    if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
        model._meta.abstract_managers.append((self.creation_counter, name,
                self))
    else:
        model._meta.concrete_managers.append((self.creation_counter, name,
            self))

def count(

self)

def count(self):
    return self.get_queryset().count()

def create(

self, **kwargs)

def create(self, **kwargs):
    return self.get_queryset().create(**kwargs)

def dates(

self, *args, **kwargs)

def dates(self, *args, **kwargs):
    return self.get_queryset().dates(*args, **kwargs)

def datetimes(

self, *args, **kwargs)

def datetimes(self, *args, **kwargs):
    return self.get_queryset().datetimes(*args, **kwargs)

def db_manager(

self, using)

def db_manager(self, using):
    obj = copy.copy(self)
    obj._db = using
    return obj

def defer(

self, *args, **kwargs)

def defer(self, *args, **kwargs):
    return self.get_queryset().defer(*args, **kwargs)

def distinct(

self, *args, **kwargs)

def distinct(self, *args, **kwargs):
    return self.get_queryset().distinct(*args, **kwargs)

def earliest(

self, *args, **kwargs)

def earliest(self, *args, **kwargs):
    return self.get_queryset().earliest(*args, **kwargs)

def exclude(

self, *args, **kwargs)

def exclude(self, *args, **kwargs):
    return self.get_queryset().exclude(*args, **kwargs)

def exists(

self, *args, **kwargs)

def exists(self, *args, **kwargs):
    return self.get_queryset().exists(*args, **kwargs)

def extra(

self, *args, **kwargs)

def extra(self, *args, **kwargs):
    return self.get_queryset().extra(*args, **kwargs)

def filter(

self, *args, **kwargs)

def filter(self, *args, **kwargs):
    return self.get_queryset().filter(*args, **kwargs)

def first(

self)

def first(self):
    return self.get_queryset().first()

def get(

self, **kwargs)

def get(self, **kwargs):
    if kwargs.has_key('public_id'):
        try:
            args = kwargs.copy()
            args['code'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
        except ObjectDoesNotExist:
            args = kwargs.copy()
            args['id'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
    return super(CoreManager, self).get(**kwargs)

def get_or_create(

self, **kwargs)

def get_or_create(self, **kwargs):
    return self.get_queryset().get_or_create(**kwargs)

def get_query_set(

*args, **kwargs)

def wrapped(*args, **kwargs):
    warnings.warn(
        "`%s.%s` is deprecated, use `%s` instead." %
        (self.class_name, self.old_method_name, self.new_method_name),
        self.deprecation_warning, 2)
    return f(*args, **kwargs)

def get_queryset(

self)

Return instrument query sets

def get_query_set(self):
    "Return instrument query sets"
    return InstrumentQuerySet(self.model)

def in_bulk(

self, *args, **kwargs)

def in_bulk(self, *args, **kwargs):
    return self.get_queryset().in_bulk(*args, **kwargs)

def iterator(

self, *args, **kwargs)

def iterator(self, *args, **kwargs):
    return self.get_queryset().iterator(*args, **kwargs)

def last(

self)

def last(self):
    return self.get_queryset().last()

def latest(

self, *args, **kwargs)

def latest(self, *args, **kwargs):
    return self.get_queryset().latest(*args, **kwargs)

def none(

self, *args, **kwargs)

def none(self, *args, **kwargs):
    ""
    return self.get_query_set().none(*args, **kwargs)

def only(

self, *args, **kwargs)

def only(self, *args, **kwargs):
    return self.get_queryset().only(*args, **kwargs)

def order_by(

self, *args, **kwargs)

def order_by(self, *args, **kwargs):
    return self.get_queryset().order_by(*args, **kwargs)

Perform a quick search on text and char fields

def raw(

self, raw_query, params=None, *args, **kwargs)

def raw(self, raw_query, params=None, *args, **kwargs):
    return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)

def reverse(

self, *args, **kwargs)

def reverse(self, *args, **kwargs):
    return self.get_queryset().reverse(*args, **kwargs)

def select_for_update(

self, *args, **kwargs)

def select_for_update(self, *args, **kwargs):
    return self.get_queryset().select_for_update(*args, **kwargs)

def update(

self, *args, **kwargs)

def update(self, *args, **kwargs):
    return self.get_queryset().update(*args, **kwargs)

def using(

self, *args, **kwargs)

def using(self, *args, **kwargs):
    return self.get_queryset().using(*args, **kwargs)

def values(

self, *args, **kwargs)

def values(self, *args, **kwargs):
    return self.get_queryset().values(*args, **kwargs)

def values_list(

self, *args, **kwargs)

def values_list(self, *args, **kwargs):
    return self.get_queryset().values_list(*args, **kwargs)

class InstrumentQuerySet

Base class for all media instrument query sets

class InstrumentQuerySet(CoreQuerySet):
    "Base class for all media instrument query sets"

    def quick_search(self, pattern):
        "Perform a quick search on text and char fields"
        from telemeta.models.instrument import Instrument
        mod = Instrument()
        pattern = pattern.strip()
        q = Q(code__contains=pattern)
        fields = mod.to_dict()
        keys =  fields.keys()
        for field in keys:
            field_str = str(mod._meta.get_field(field))
            if 'CharField' in field_str or 'TextField' in field_str:
                q = q | word_search_q(field, pattern)
        return self.filter(q)

Ancestors (in MRO)

  • InstrumentQuerySet
  • telemeta.models.core.CoreQuerySet
  • telemeta.models.core.EnhancedQuerySet
  • django.db.models.query.QuerySet
  • __builtin__.object

Class variables

var value_annotation

Instance variables

var db

Return the database that will be used if this query is executed now

var ordered

Returns True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model.

Methods

def __init__(

self, model=None, query=None, using=None)

def __init__(self, model=None, query=None, using=None):
    self.model = model
    self._db = using
    self.query = query or sql.Query(self.model)
    self._result_cache = None
    self._sticky_filter = False
    self._for_write = False
    self._prefetch_related_lookups = []
    self._prefetch_done = False
    self._known_related_objects = {}        # {rel_field, {pk: rel_obj}}

def aggregate(

self, *args, **kwargs)

Returns a dictionary containing the calculations (aggregation) over the current queryset

If args is present the expression is passed as a kwarg using the Aggregate object's default alias.

def aggregate(self, *args, **kwargs):
    """
    Returns a dictionary containing the calculations (aggregation)
    over the current queryset
    If args is present the expression is passed as a kwarg using
    the Aggregate object's default alias.
    """
    if self.query.distinct_fields:
        raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
    for arg in args:
        kwargs[arg.default_alias] = arg
    query = self.query.clone()
    for (alias, aggregate_expr) in kwargs.items():
        query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=True)
    return query.get_aggregation(using=self.db)

def all(

self)

Returns a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases.

def all(self):
    """
    Returns a new QuerySet that is a copy of the current one. This allows a
    QuerySet to proxy for a model manager in some cases.
    """
    return self._clone()

def annotate(

self, *args, **kwargs)

Return a query set in which the returned objects have been annotated with data aggregated from related fields.

def annotate(self, *args, **kwargs):
    """
    Return a query set in which the returned objects have been annotated
    with data aggregated from related fields.
    """
    for arg in args:
        if arg.default_alias in kwargs:
            raise ValueError("The named annotation '%s' conflicts with the "
                             "default name for another annotation."
                             % arg.default_alias)
        kwargs[arg.default_alias] = arg
    names = getattr(self, '_fields', None)
    if names is None:
        names = set(self.model._meta.get_all_field_names())
    for aggregate in kwargs:
        if aggregate in names:
            raise ValueError("The annotation '%s' conflicts with a field on "
                "the model." % aggregate)
    obj = self._clone()
    obj._setup_aggregate_query(list(kwargs))
    # Add the aggregates to the query
    for (alias, aggregate_expr) in kwargs.items():
        obj.query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=False)
    return obj

def bulk_create(

self, objs, batch_size=None)

Inserts each of the instances into the database. This does not call save() on each of the instances, does not send any pre/post save signals, and does not set the primary key attribute if it is an autoincrement field.

def bulk_create(self, objs, batch_size=None):
    """
    Inserts each of the instances into the database. This does *not* call
    save() on each of the instances, does not send any pre/post save
    signals, and does not set the primary key attribute if it is an
    autoincrement field.
    """
    # So this case is fun. When you bulk insert you don't get the primary
    # keys back (if it's an autoincrement), so you can't insert into the
    # child tables which references this. There are two workarounds, 1)
    # this could be implemented if you didn't have an autoincrement pk,
    # and 2) you could do it by doing O(n) normal inserts into the parent
    # tables to get the primary keys back, and then doing a single bulk
    # insert into the childmost table. Some databases might allow doing
    # this by using RETURNING clause for the insert query. We're punting
    # on these for now because they are relatively rare cases.
    assert batch_size is None or batch_size > 0
    if self.model._meta.parents:
        raise ValueError("Can't bulk create an inherited model")
    if not objs:
        return objs
    self._for_write = True
    connection = connections[self.db]
    fields = self.model._meta.local_concrete_fields
    with transaction.commit_on_success_unless_managed(using=self.db):
        if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
            and self.model._meta.has_auto_field):
            self._batched_insert(objs, fields, batch_size)
        else:
            objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
            if objs_with_pk:
                self._batched_insert(objs_with_pk, fields, batch_size)
            if objs_without_pk:
                fields= [f for f in fields if not isinstance(f, AutoField)]
                self._batched_insert(objs_without_pk, fields, batch_size)
    return objs

def complex_filter(

self, filter_obj)

Returns a new QuerySet instance with filter_obj added to the filters.

filter_obj can be a Q object (or anything with an add_to_query() method) or a dictionary of keyword lookup arguments.

This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods.

def complex_filter(self, filter_obj):
    """
    Returns a new QuerySet instance with filter_obj added to the filters.
    filter_obj can be a Q object (or anything with an add_to_query()
    method) or a dictionary of keyword lookup arguments.
    This exists to support framework features such as 'limit_choices_to',
    and usually it will be more natural to use other methods.
    """
    if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
        clone = self._clone()
        clone.query.add_q(filter_obj)
        return clone
    else:
        return self._filter_or_exclude(None, **filter_obj)

def count(

self)

Performs a SELECT COUNT() and returns the number of records as an integer.

If the QuerySet is already fully cached this simply returns the length of the cached results set to avoid multiple SELECT COUNT(*) calls.

def count(self):
    """
    Performs a SELECT COUNT() and returns the number of records as an
    integer.
    If the QuerySet is already fully cached this simply returns the length
    of the cached results set to avoid multiple SELECT COUNT(*) calls.
    """
    if self._result_cache is not None:
        return len(self._result_cache)
    return self.query.get_count(using=self.db)

def create(

self, **kwargs)

Creates a new object with the given kwargs, saving it to the database and returning the created object.

def create(self, **kwargs):
    """
    Creates a new object with the given kwargs, saving it to the database
    and returning the created object.
    """
    obj = self.model(**kwargs)
    self._for_write = True
    obj.save(force_insert=True, using=self.db)
    return obj

def dates(

self, field_name, kind, order='ASC')

Returns a list of date objects representing all available dates for the given field_name, scoped to 'kind'.

def dates(self, field_name, kind, order='ASC'):
    """
    Returns a list of date objects representing all available dates for
    the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day"), \
            "'kind' must be one of 'year', 'month' or 'day'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    return self._clone(klass=DateQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order)

def datetimes(

self, field_name, kind, order='ASC', tzinfo=None)

Returns a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'.

def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
    """
    Returns a list of datetime objects representing all available
    datetimes for the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day", "hour", "minute", "second"), \
            "'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    if settings.USE_TZ:
        if tzinfo is None:
            tzinfo = timezone.get_current_timezone()
    else:
        tzinfo = None
    return self._clone(klass=DateTimeQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order, _tzinfo=tzinfo)

def defer(

self, *fields)

Defers the loading of data for certain fields until they are accessed. The set of fields to defer is added to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case all deferrals are removed (None acts as a reset option).

def defer(self, *fields):
    """
    Defers the loading of data for certain fields until they are accessed.
    The set of fields to defer is added to any existing set of deferred
    fields. The only exception to this is if None is passed in as the only
    parameter, in which case all deferrals are removed (None acts as a
    reset option).
    """
    clone = self._clone()
    if fields == (None,):
        clone.query.clear_deferred_loading()
    else:
        clone.query.add_deferred_loading(fields)
    return clone

def delete(

self)

def delete(self):
    CHUNK=1024
    objects = self.model._meta.get_all_related_objects()
    ii = self.count()
    values = self.values_list('pk')
    for related in objects:
        i = 0
        while i < ii:
            ids = [v[0] for v in values[i:i + CHUNK]]
            filter = {related.field.name + '__pk__in': ids}
            q = related.model.objects.filter(**filter)
            if isinstance(related.field, WeakForeignKey):
                update = {related.field.name: None}
                q.update(**update)
            else:
                q.delete()
            i += CHUNK
    super(EnhancedQuerySet, self).delete()

def distinct(

self, *field_names)

Returns a new QuerySet instance that will select only distinct results.

def distinct(self, *field_names):
    """
    Returns a new QuerySet instance that will select only distinct results.
    """
    assert self.query.can_filter(), \
            "Cannot create distinct fields once a slice has been taken."
    obj = self._clone()
    obj.query.add_distinct_fields(*field_names)
    return obj

def earliest(

self, field_name=None)

def earliest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="")

def exclude(

self, *args, **kwargs)

Returns a new QuerySet instance with NOT (args) ANDed to the existing set.

def exclude(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with NOT (args) ANDed to the existing
    set.
    """
    return self._filter_or_exclude(True, *args, **kwargs)

def exists(

self)

def exists(self):
    if self._result_cache is None:
        return self.query.has_results(using=self.db)
    return bool(self._result_cache)

def extra(

self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None)

Adds extra SQL fragments to the query.

def extra(self, select=None, where=None, params=None, tables=None,
          order_by=None, select_params=None):
    """
    Adds extra SQL fragments to the query.
    """
    assert self.query.can_filter(), \
            "Cannot change a query once a slice has been taken"
    clone = self._clone()
    clone.query.add_extra(select, select_params, where, params, tables, order_by)
    return clone

def filter(

self, *args, **kwargs)

Returns a new QuerySet instance with the args ANDed to the existing set.

def filter(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with the args ANDed to the existing
    set.
    """
    return self._filter_or_exclude(False, *args, **kwargs)

def first(

self)

Returns the first object of a query, returns None if no match is found.

def first(self):
    """
    Returns the first object of a query, returns None if no match is found.
    """
    qs = self if self.ordered else self.order_by('pk')
    try:
        return qs[0]
    except IndexError:
        return None

def get(

self, *args, **kwargs)

Performs the query and returns a single object matching the given keyword arguments.

def get(self, *args, **kwargs):
    """
    Performs the query and returns a single object matching the given
    keyword arguments.
    """
    clone = self.filter(*args, **kwargs)
    if self.query.can_filter():
        clone = clone.order_by()
    num = len(clone)
    if num == 1:
        return clone._result_cache[0]
    if not num:
        raise self.model.DoesNotExist(
            "%s matching query does not exist." %
            self.model._meta.object_name)
    raise self.model.MultipleObjectsReturned(
        "get() returned more than one %s -- it returned %s!" %
        (self.model._meta.object_name, num))

def get_or_create(

self, **kwargs)

Looks up an object with the given kwargs, creating one if necessary. Returns a tuple of (object, created), where created is a boolean specifying whether an object was created.

def get_or_create(self, **kwargs):
    """
    Looks up an object with the given kwargs, creating one if necessary.
    Returns a tuple of (object, created), where created is a boolean
    specifying whether an object was created.
    """
    defaults = kwargs.pop('defaults', {})
    lookup = kwargs.copy()
    for f in self.model._meta.fields:
        if f.attname in lookup:
            lookup[f.name] = lookup.pop(f.attname)
    try:
        self._for_write = True
        return self.get(**lookup), False
    except self.model.DoesNotExist:
        try:
            params = dict((k, v) for k, v in kwargs.items() if LOOKUP_SEP not in k)
            params.update(defaults)
            obj = self.model(**params)
            with transaction.atomic(using=self.db):
                obj.save(force_insert=True, using=self.db)
            return obj, True
        except DatabaseError:
            exc_info = sys.exc_info()
            try:
                return self.get(**lookup), False
            except self.model.DoesNotExist:
                # Re-raise the DatabaseError with its original traceback.
                six.reraise(*exc_info)

def in_bulk(

self, id_list)

Returns a dictionary mapping each of the given IDs to the object with that ID.

def in_bulk(self, id_list):
    """
    Returns a dictionary mapping each of the given IDs to the object with
    that ID.
    """
    assert self.query.can_filter(), \
            "Cannot use 'limit' or 'offset' with in_bulk"
    if not id_list:
        return {}
    qs = self.filter(pk__in=id_list).order_by()
    return dict([(obj._get_pk_val(), obj) for obj in qs])

def iterator(

self)

An iterator over the results from applying this QuerySet to the database.

def iterator(self):
    """
    An iterator over the results from applying this QuerySet to the
    database.
    """
    fill_cache = False
    if connections[self.db].features.supports_select_related:
        fill_cache = self.query.select_related
    if isinstance(fill_cache, dict):
        requested = fill_cache
    else:
        requested = None
    max_depth = self.query.max_depth
    extra_select = list(self.query.extra_select)
    aggregate_select = list(self.query.aggregate_select)
    only_load = self.query.get_loaded_field_names()
    if not fill_cache:
        fields = self.model._meta.concrete_fields
    load_fields = []
    # If only/defer clauses have been specified,
    # build the list of fields that are to be loaded.
    if only_load:
        for field, model in self.model._meta.get_concrete_fields_with_model():
            if model is None:
                model = self.model
            try:
                if field.name in only_load[model]:
                    # Add a field that has been explicitly included
                    load_fields.append(field.name)
            except KeyError:
                # Model wasn't explicitly listed in the only_load table
                # Therefore, we need to load all fields from this model
                load_fields.append(field.name)
    index_start = len(extra_select)
    aggregate_start = index_start + len(load_fields or self.model._meta.concrete_fields)
    skip = None
    if load_fields and not fill_cache:
        # Some fields have been deferred, so we have to initialise
        # via keyword arguments.
        skip = set()
        init_list = []
        for field in fields:
            if field.name not in load_fields:
                skip.add(field.attname)
            else:
                init_list.append(field.attname)
        model_cls = deferred_class_factory(self.model, skip)
    # Cache db and model outside the loop
    db = self.db
    model = self.model
    compiler = self.query.get_compiler(using=db)
    if fill_cache:
        klass_info = get_klass_info(model, max_depth=max_depth,
                                    requested=requested, only_load=only_load)
    for row in compiler.results_iter():
        if fill_cache:
            obj, _ = get_cached_row(row, index_start, db, klass_info,
                                    offset=len(aggregate_select))
        else:
            # Omit aggregates in object creation.
            row_data = row[index_start:aggregate_start]
            if skip:
                obj = model_cls(**dict(zip(init_list, row_data)))
            else:
                obj = model(*row_data)
            # Store the source database of the object
            obj._state.db = db
            # This object came from the database; it's not being added.
            obj._state.adding = False
        if extra_select:
            for i, k in enumerate(extra_select):
                setattr(obj, k, row[i])
        # Add the aggregates to the model
        if aggregate_select:
            for i, aggregate in enumerate(aggregate_select):
                setattr(obj, aggregate, row[i + aggregate_start])
        # Add the known related objects to the model, if there are any
        if self._known_related_objects:
            for field, rel_objs in self._known_related_objects.items():
                # Avoid overwriting objects loaded e.g. by select_related
                if hasattr(obj, field.get_cache_name()):
                    continue
                pk = getattr(obj, field.get_attname())
                try:
                    rel_obj = rel_objs[pk]
                except KeyError:
                    pass               # may happen in qs1 | qs2 scenarios
                else:
                    setattr(obj, field.name, rel_obj)
        yield obj

def last(

self)

Returns the last object of a query, returns None if no match is found.

def last(self):
    """
    Returns the last object of a query, returns None if no match is found.
    """
    qs = self.reverse() if self.ordered else self.order_by('-pk')
    try:
        return qs[0]
    except IndexError:
        return None

def latest(

self, field_name=None)

def latest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="-")

def none(

self)

Return an empty result set

def none(self): # redundant with none() in recent Django svn
    "Return an empty result set"
    return self.extra(where = ["0 = 1"])

def only(

self, *fields)

Essentially, the opposite of defer. Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated.

def only(self, *fields):
    """
    Essentially, the opposite of defer. Only the fields passed into this
    method and that are not already specified as deferred are loaded
    immediately when the queryset is evaluated.
    """
    if fields == (None,):
        # Can only pass None to defer(), not only(), as the rest option.
        # That won't stop people trying to do this, so let's be explicit.
        raise TypeError("Cannot pass None as an argument to only().")
    clone = self._clone()
    clone.query.add_immediate_loading(fields)
    return clone

def order_by(

self, *field_names)

Returns a new QuerySet instance with the ordering changed.

def order_by(self, *field_names):
    """
    Returns a new QuerySet instance with the ordering changed.
    """
    assert self.query.can_filter(), \
            "Cannot reorder a query once a slice has been taken."
    obj = self._clone()
    obj.query.clear_ordering(force_empty=False)
    obj.query.add_ordering(*field_names)
    return obj

Returns a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated.

When prefetch_related() is called more than once, the list of lookups to prefetch is appended to. If prefetch_related(None) is called, the the list is cleared.

Perform a quick search on text and char fields

def reverse(

self)

Reverses the ordering of the QuerySet.

def reverse(self):
    """
    Reverses the ordering of the QuerySet.
    """
    clone = self._clone()
    clone.query.standard_ordering = not clone.query.standard_ordering
    return clone

def select_for_update(

self, **kwargs)

Returns a new QuerySet instance that will select objects with a FOR UPDATE lock.

def select_for_update(self, **kwargs):
    """
    Returns a new QuerySet instance that will select objects with a
    FOR UPDATE lock.
    """
    # Default to false for nowait
    nowait = kwargs.pop('nowait', False)
    obj = self._clone()
    obj._for_write = True
    obj.query.select_for_update = True
    obj.query.select_for_update_nowait = nowait
    return obj

Returns a new QuerySet instance that will select related objects.

If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection.

If select_related(None) is called, the list is cleared.

def update(

self, **kwargs)

Updates all elements in the current QuerySet, setting all the given fields to the appropriate values.

def update(self, **kwargs):
    """
    Updates all elements in the current QuerySet, setting all the given
    fields to the appropriate values.
    """
    assert self.query.can_filter(), \
            "Cannot update a query once a slice has been taken."
    self._for_write = True
    query = self.query.clone(sql.UpdateQuery)
    query.add_update_values(kwargs)
    with transaction.commit_on_success_unless_managed(using=self.db):
        rows = query.get_compiler(self.db).execute_sql(None)
    self._result_cache = None
    return rows

def using(

self, alias)

Selects which database this QuerySet should excecute its query against.

def using(self, alias):
    """
    Selects which database this QuerySet should excecute its query against.
    """
    clone = self._clone()
    clone._db = alias
    return clone

def values(

self, *fields)

def values(self, *fields):
    return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)

def values_list(

self, *fields, **kwargs)

def values_list(self, *fields, **kwargs):
    flat = kwargs.pop('flat', False)
    if kwargs:
        raise TypeError('Unexpected keyword arguments to values_list: %s'
                % (list(kwargs),))
    if flat and len(fields) > 1:
        raise TypeError("'flat' is not valid when values_list is called with more than one field.")
    return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
            _fields=fields)

class LocationManager

class LocationManager(CoreManager):

    def get_query_set(self):
        "Return location query set"
        return LocationQuerySet(self.model)

    def by_flatname(self, *args, **kwargs):
        return self.get_query_set().by_flatname(*args, **kwargs)
    by_flatname.__doc__ = LocationQuerySet.by_flatname.__doc__

    def flatname_map(self, *args, **kwargs):
        return self.get_query_set().flatname_map(*args, **kwargs)
    flatname_map.__doc__ = LocationQuerySet.flatname_map.__doc__

Ancestors (in MRO)

  • LocationManager
  • telemeta.models.core.CoreManager
  • telemeta.models.core.EnhancedManager
  • django.db.models.manager.Manager
  • __builtin__.object

Class variables

var creation_counter

Instance variables

var db

Methods

def __init__(

self)

def __init__(self):
    super(Manager, self).__init__()
    self._set_creation_counter()
    self.model = None
    self._inherited = False
    self._db = None

def aggregate(

self, *args, **kwargs)

def aggregate(self, *args, **kwargs):
    return self.get_queryset().aggregate(*args, **kwargs)

def all(

self)

def all(self):
    return self.get_queryset()

def annotate(

self, *args, **kwargs)

def annotate(self, *args, **kwargs):
    return self.get_queryset().annotate(*args, **kwargs)

def bulk_create(

self, *args, **kwargs)

def bulk_create(self, *args, **kwargs):
    return self.get_queryset().bulk_create(*args, **kwargs)

def by_flatname(

self, *args, **kwargs)

def by_flatname(self, *args, **kwargs):
    return self.get_query_set().by_flatname(*args, **kwargs)

def complex_filter(

self, *args, **kwargs)

def complex_filter(self, *args, **kwargs):
    return self.get_queryset().complex_filter(*args, **kwargs)

def contribute_to_class(

self, model, name)

def contribute_to_class(self, model, name):
    # TODO: Use weakref because of possible memory leak / circular reference.
    self.model = model
    # Only contribute the manager if the model is concrete
    if model._meta.abstract:
        setattr(model, name, AbstractManagerDescriptor(model))
    elif model._meta.swapped:
        setattr(model, name, SwappedManagerDescriptor(model))
    else:
    # if not model._meta.abstract and not model._meta.swapped:
        setattr(model, name, ManagerDescriptor(self))
    if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
        model._default_manager = self
    if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
        model._meta.abstract_managers.append((self.creation_counter, name,
                self))
    else:
        model._meta.concrete_managers.append((self.creation_counter, name,
            self))

def count(

self)

def count(self):
    return self.get_queryset().count()

def create(

self, **kwargs)

def create(self, **kwargs):
    return self.get_queryset().create(**kwargs)

def dates(

self, *args, **kwargs)

def dates(self, *args, **kwargs):
    return self.get_queryset().dates(*args, **kwargs)

def datetimes(

self, *args, **kwargs)

def datetimes(self, *args, **kwargs):
    return self.get_queryset().datetimes(*args, **kwargs)

def db_manager(

self, using)

def db_manager(self, using):
    obj = copy.copy(self)
    obj._db = using
    return obj

def defer(

self, *args, **kwargs)

def defer(self, *args, **kwargs):
    return self.get_queryset().defer(*args, **kwargs)

def distinct(

self, *args, **kwargs)

def distinct(self, *args, **kwargs):
    return self.get_queryset().distinct(*args, **kwargs)

def earliest(

self, *args, **kwargs)

def earliest(self, *args, **kwargs):
    return self.get_queryset().earliest(*args, **kwargs)

def exclude(

self, *args, **kwargs)

def exclude(self, *args, **kwargs):
    return self.get_queryset().exclude(*args, **kwargs)

def exists(

self, *args, **kwargs)

def exists(self, *args, **kwargs):
    return self.get_queryset().exists(*args, **kwargs)

def extra(

self, *args, **kwargs)

def extra(self, *args, **kwargs):
    return self.get_queryset().extra(*args, **kwargs)

def filter(

self, *args, **kwargs)

def filter(self, *args, **kwargs):
    return self.get_queryset().filter(*args, **kwargs)

def first(

self)

def first(self):
    return self.get_queryset().first()

def flatname_map(

self, *args, **kwargs)

def flatname_map(self, *args, **kwargs):
    return self.get_query_set().flatname_map(*args, **kwargs)

def get(

self, **kwargs)

def get(self, **kwargs):
    if kwargs.has_key('public_id'):
        try:
            args = kwargs.copy()
            args['code'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
        except ObjectDoesNotExist:
            args = kwargs.copy()
            args['id'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
    return super(CoreManager, self).get(**kwargs)

def get_or_create(

self, **kwargs)

def get_or_create(self, **kwargs):
    return self.get_queryset().get_or_create(**kwargs)

def get_query_set(

*args, **kwargs)

def wrapped(*args, **kwargs):
    warnings.warn(
        "`%s.%s` is deprecated, use `%s` instead." %
        (self.class_name, self.old_method_name, self.new_method_name),
        self.deprecation_warning, 2)
    return f(*args, **kwargs)

def get_queryset(

self)

Return location query set

def get_query_set(self):
    "Return location query set"
    return LocationQuerySet(self.model)

def in_bulk(

self, *args, **kwargs)

def in_bulk(self, *args, **kwargs):
    return self.get_queryset().in_bulk(*args, **kwargs)

def iterator(

self, *args, **kwargs)

def iterator(self, *args, **kwargs):
    return self.get_queryset().iterator(*args, **kwargs)

def last(

self)

def last(self):
    return self.get_queryset().last()

def latest(

self, *args, **kwargs)

def latest(self, *args, **kwargs):
    return self.get_queryset().latest(*args, **kwargs)

def none(

self, *args, **kwargs)

def none(self, *args, **kwargs):
    ""
    return self.get_query_set().none(*args, **kwargs)

def only(

self, *args, **kwargs)

def only(self, *args, **kwargs):
    return self.get_queryset().only(*args, **kwargs)

def order_by(

self, *args, **kwargs)

def order_by(self, *args, **kwargs):
    return self.get_queryset().order_by(*args, **kwargs)

def raw(

self, raw_query, params=None, *args, **kwargs)

def raw(self, raw_query, params=None, *args, **kwargs):
    return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)

def reverse(

self, *args, **kwargs)

def reverse(self, *args, **kwargs):
    return self.get_queryset().reverse(*args, **kwargs)

def select_for_update(

self, *args, **kwargs)

def select_for_update(self, *args, **kwargs):
    return self.get_queryset().select_for_update(*args, **kwargs)

def update(

self, *args, **kwargs)

def update(self, *args, **kwargs):
    return self.get_queryset().update(*args, **kwargs)

def using(

self, *args, **kwargs)

def using(self, *args, **kwargs):
    return self.get_queryset().using(*args, **kwargs)

def values(

self, *args, **kwargs)

def values(self, *args, **kwargs):
    return self.get_queryset().values(*args, **kwargs)

def values_list(

self, *args, **kwargs)

def values_list(self, *args, **kwargs):
    return self.get_queryset().values_list(*args, **kwargs)

class LocationQuerySet

class LocationQuerySet(CoreQuerySet):
    __flatname_map = None

    def by_flatname(self, flatname):
        map = self.flatname_map()
        return self.filter(pk=map[flatname])

    def flatname_map(self):
        if self.__class__.__flatname_map:
            return self.__class__.__flatname_map

        map = {}
        locations = self.filter(Q(type=self.model.COUNTRY) | Q(type=self.model.CONTINENT))
        for l in locations:
            flatname = unaccent(l.name).lower()
            flatname = re.sub('[^a-z]', '_', flatname)
            while map.has_key(flatname):
                flatname = '_' + flatname
            map[flatname] = l.id

        self.__class__.__flatname_map = map
        return map

    def current(self):
        return self.filter(id__in=self.values_list('current_location_id', flat=True)).distinct()

Ancestors (in MRO)

  • LocationQuerySet
  • telemeta.models.core.CoreQuerySet
  • telemeta.models.core.EnhancedQuerySet
  • django.db.models.query.QuerySet
  • __builtin__.object

Class variables

var value_annotation

Instance variables

var db

Return the database that will be used if this query is executed now

var ordered

Returns True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model.

Methods

def __init__(

self, model=None, query=None, using=None)

def __init__(self, model=None, query=None, using=None):
    self.model = model
    self._db = using
    self.query = query or sql.Query(self.model)
    self._result_cache = None
    self._sticky_filter = False
    self._for_write = False
    self._prefetch_related_lookups = []
    self._prefetch_done = False
    self._known_related_objects = {}        # {rel_field, {pk: rel_obj}}

def aggregate(

self, *args, **kwargs)

Returns a dictionary containing the calculations (aggregation) over the current queryset

If args is present the expression is passed as a kwarg using the Aggregate object's default alias.

def aggregate(self, *args, **kwargs):
    """
    Returns a dictionary containing the calculations (aggregation)
    over the current queryset
    If args is present the expression is passed as a kwarg using
    the Aggregate object's default alias.
    """
    if self.query.distinct_fields:
        raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
    for arg in args:
        kwargs[arg.default_alias] = arg
    query = self.query.clone()
    for (alias, aggregate_expr) in kwargs.items():
        query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=True)
    return query.get_aggregation(using=self.db)

def all(

self)

Returns a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases.

def all(self):
    """
    Returns a new QuerySet that is a copy of the current one. This allows a
    QuerySet to proxy for a model manager in some cases.
    """
    return self._clone()

def annotate(

self, *args, **kwargs)

Return a query set in which the returned objects have been annotated with data aggregated from related fields.

def annotate(self, *args, **kwargs):
    """
    Return a query set in which the returned objects have been annotated
    with data aggregated from related fields.
    """
    for arg in args:
        if arg.default_alias in kwargs:
            raise ValueError("The named annotation '%s' conflicts with the "
                             "default name for another annotation."
                             % arg.default_alias)
        kwargs[arg.default_alias] = arg
    names = getattr(self, '_fields', None)
    if names is None:
        names = set(self.model._meta.get_all_field_names())
    for aggregate in kwargs:
        if aggregate in names:
            raise ValueError("The annotation '%s' conflicts with a field on "
                "the model." % aggregate)
    obj = self._clone()
    obj._setup_aggregate_query(list(kwargs))
    # Add the aggregates to the query
    for (alias, aggregate_expr) in kwargs.items():
        obj.query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=False)
    return obj

def bulk_create(

self, objs, batch_size=None)

Inserts each of the instances into the database. This does not call save() on each of the instances, does not send any pre/post save signals, and does not set the primary key attribute if it is an autoincrement field.

def bulk_create(self, objs, batch_size=None):
    """
    Inserts each of the instances into the database. This does *not* call
    save() on each of the instances, does not send any pre/post save
    signals, and does not set the primary key attribute if it is an
    autoincrement field.
    """
    # So this case is fun. When you bulk insert you don't get the primary
    # keys back (if it's an autoincrement), so you can't insert into the
    # child tables which references this. There are two workarounds, 1)
    # this could be implemented if you didn't have an autoincrement pk,
    # and 2) you could do it by doing O(n) normal inserts into the parent
    # tables to get the primary keys back, and then doing a single bulk
    # insert into the childmost table. Some databases might allow doing
    # this by using RETURNING clause for the insert query. We're punting
    # on these for now because they are relatively rare cases.
    assert batch_size is None or batch_size > 0
    if self.model._meta.parents:
        raise ValueError("Can't bulk create an inherited model")
    if not objs:
        return objs
    self._for_write = True
    connection = connections[self.db]
    fields = self.model._meta.local_concrete_fields
    with transaction.commit_on_success_unless_managed(using=self.db):
        if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
            and self.model._meta.has_auto_field):
            self._batched_insert(objs, fields, batch_size)
        else:
            objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
            if objs_with_pk:
                self._batched_insert(objs_with_pk, fields, batch_size)
            if objs_without_pk:
                fields= [f for f in fields if not isinstance(f, AutoField)]
                self._batched_insert(objs_without_pk, fields, batch_size)
    return objs

def by_flatname(

self, flatname)

def by_flatname(self, flatname):
    map = self.flatname_map()
    return self.filter(pk=map[flatname])

def complex_filter(

self, filter_obj)

Returns a new QuerySet instance with filter_obj added to the filters.

filter_obj can be a Q object (or anything with an add_to_query() method) or a dictionary of keyword lookup arguments.

This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods.

def complex_filter(self, filter_obj):
    """
    Returns a new QuerySet instance with filter_obj added to the filters.
    filter_obj can be a Q object (or anything with an add_to_query()
    method) or a dictionary of keyword lookup arguments.
    This exists to support framework features such as 'limit_choices_to',
    and usually it will be more natural to use other methods.
    """
    if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
        clone = self._clone()
        clone.query.add_q(filter_obj)
        return clone
    else:
        return self._filter_or_exclude(None, **filter_obj)

def count(

self)

Performs a SELECT COUNT() and returns the number of records as an integer.

If the QuerySet is already fully cached this simply returns the length of the cached results set to avoid multiple SELECT COUNT(*) calls.

def count(self):
    """
    Performs a SELECT COUNT() and returns the number of records as an
    integer.
    If the QuerySet is already fully cached this simply returns the length
    of the cached results set to avoid multiple SELECT COUNT(*) calls.
    """
    if self._result_cache is not None:
        return len(self._result_cache)
    return self.query.get_count(using=self.db)

def create(

self, **kwargs)

Creates a new object with the given kwargs, saving it to the database and returning the created object.

def create(self, **kwargs):
    """
    Creates a new object with the given kwargs, saving it to the database
    and returning the created object.
    """
    obj = self.model(**kwargs)
    self._for_write = True
    obj.save(force_insert=True, using=self.db)
    return obj

def current(

self)

def current(self):
    return self.filter(id__in=self.values_list('current_location_id', flat=True)).distinct()

def dates(

self, field_name, kind, order='ASC')

Returns a list of date objects representing all available dates for the given field_name, scoped to 'kind'.

def dates(self, field_name, kind, order='ASC'):
    """
    Returns a list of date objects representing all available dates for
    the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day"), \
            "'kind' must be one of 'year', 'month' or 'day'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    return self._clone(klass=DateQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order)

def datetimes(

self, field_name, kind, order='ASC', tzinfo=None)

Returns a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'.

def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
    """
    Returns a list of datetime objects representing all available
    datetimes for the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day", "hour", "minute", "second"), \
            "'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    if settings.USE_TZ:
        if tzinfo is None:
            tzinfo = timezone.get_current_timezone()
    else:
        tzinfo = None
    return self._clone(klass=DateTimeQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order, _tzinfo=tzinfo)

def defer(

self, *fields)

Defers the loading of data for certain fields until they are accessed. The set of fields to defer is added to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case all deferrals are removed (None acts as a reset option).

def defer(self, *fields):
    """
    Defers the loading of data for certain fields until they are accessed.
    The set of fields to defer is added to any existing set of deferred
    fields. The only exception to this is if None is passed in as the only
    parameter, in which case all deferrals are removed (None acts as a
    reset option).
    """
    clone = self._clone()
    if fields == (None,):
        clone.query.clear_deferred_loading()
    else:
        clone.query.add_deferred_loading(fields)
    return clone

def delete(

self)

def delete(self):
    CHUNK=1024
    objects = self.model._meta.get_all_related_objects()
    ii = self.count()
    values = self.values_list('pk')
    for related in objects:
        i = 0
        while i < ii:
            ids = [v[0] for v in values[i:i + CHUNK]]
            filter = {related.field.name + '__pk__in': ids}
            q = related.model.objects.filter(**filter)
            if isinstance(related.field, WeakForeignKey):
                update = {related.field.name: None}
                q.update(**update)
            else:
                q.delete()
            i += CHUNK
    super(EnhancedQuerySet, self).delete()

def distinct(

self, *field_names)

Returns a new QuerySet instance that will select only distinct results.

def distinct(self, *field_names):
    """
    Returns a new QuerySet instance that will select only distinct results.
    """
    assert self.query.can_filter(), \
            "Cannot create distinct fields once a slice has been taken."
    obj = self._clone()
    obj.query.add_distinct_fields(*field_names)
    return obj

def earliest(

self, field_name=None)

def earliest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="")

def exclude(

self, *args, **kwargs)

Returns a new QuerySet instance with NOT (args) ANDed to the existing set.

def exclude(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with NOT (args) ANDed to the existing
    set.
    """
    return self._filter_or_exclude(True, *args, **kwargs)

def exists(

self)

def exists(self):
    if self._result_cache is None:
        return self.query.has_results(using=self.db)
    return bool(self._result_cache)

def extra(

self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None)

Adds extra SQL fragments to the query.

def extra(self, select=None, where=None, params=None, tables=None,
          order_by=None, select_params=None):
    """
    Adds extra SQL fragments to the query.
    """
    assert self.query.can_filter(), \
            "Cannot change a query once a slice has been taken"
    clone = self._clone()
    clone.query.add_extra(select, select_params, where, params, tables, order_by)
    return clone

def filter(

self, *args, **kwargs)

Returns a new QuerySet instance with the args ANDed to the existing set.

def filter(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with the args ANDed to the existing
    set.
    """
    return self._filter_or_exclude(False, *args, **kwargs)

def first(

self)

Returns the first object of a query, returns None if no match is found.

def first(self):
    """
    Returns the first object of a query, returns None if no match is found.
    """
    qs = self if self.ordered else self.order_by('pk')
    try:
        return qs[0]
    except IndexError:
        return None

def flatname_map(

self)

def flatname_map(self):
    if self.__class__.__flatname_map:
        return self.__class__.__flatname_map
    map = {}
    locations = self.filter(Q(type=self.model.COUNTRY) | Q(type=self.model.CONTINENT))
    for l in locations:
        flatname = unaccent(l.name).lower()
        flatname = re.sub('[^a-z]', '_', flatname)
        while map.has_key(flatname):
            flatname = '_' + flatname
        map[flatname] = l.id
    self.__class__.__flatname_map = map
    return map

def get(

self, *args, **kwargs)

Performs the query and returns a single object matching the given keyword arguments.

def get(self, *args, **kwargs):
    """
    Performs the query and returns a single object matching the given
    keyword arguments.
    """
    clone = self.filter(*args, **kwargs)
    if self.query.can_filter():
        clone = clone.order_by()
    num = len(clone)
    if num == 1:
        return clone._result_cache[0]
    if not num:
        raise self.model.DoesNotExist(
            "%s matching query does not exist." %
            self.model._meta.object_name)
    raise self.model.MultipleObjectsReturned(
        "get() returned more than one %s -- it returned %s!" %
        (self.model._meta.object_name, num))

def get_or_create(

self, **kwargs)

Looks up an object with the given kwargs, creating one if necessary. Returns a tuple of (object, created), where created is a boolean specifying whether an object was created.

def get_or_create(self, **kwargs):
    """
    Looks up an object with the given kwargs, creating one if necessary.
    Returns a tuple of (object, created), where created is a boolean
    specifying whether an object was created.
    """
    defaults = kwargs.pop('defaults', {})
    lookup = kwargs.copy()
    for f in self.model._meta.fields:
        if f.attname in lookup:
            lookup[f.name] = lookup.pop(f.attname)
    try:
        self._for_write = True
        return self.get(**lookup), False
    except self.model.DoesNotExist:
        try:
            params = dict((k, v) for k, v in kwargs.items() if LOOKUP_SEP not in k)
            params.update(defaults)
            obj = self.model(**params)
            with transaction.atomic(using=self.db):
                obj.save(force_insert=True, using=self.db)
            return obj, True
        except DatabaseError:
            exc_info = sys.exc_info()
            try:
                return self.get(**lookup), False
            except self.model.DoesNotExist:
                # Re-raise the DatabaseError with its original traceback.
                six.reraise(*exc_info)

def in_bulk(

self, id_list)

Returns a dictionary mapping each of the given IDs to the object with that ID.

def in_bulk(self, id_list):
    """
    Returns a dictionary mapping each of the given IDs to the object with
    that ID.
    """
    assert self.query.can_filter(), \
            "Cannot use 'limit' or 'offset' with in_bulk"
    if not id_list:
        return {}
    qs = self.filter(pk__in=id_list).order_by()
    return dict([(obj._get_pk_val(), obj) for obj in qs])

def iterator(

self)

An iterator over the results from applying this QuerySet to the database.

def iterator(self):
    """
    An iterator over the results from applying this QuerySet to the
    database.
    """
    fill_cache = False
    if connections[self.db].features.supports_select_related:
        fill_cache = self.query.select_related
    if isinstance(fill_cache, dict):
        requested = fill_cache
    else:
        requested = None
    max_depth = self.query.max_depth
    extra_select = list(self.query.extra_select)
    aggregate_select = list(self.query.aggregate_select)
    only_load = self.query.get_loaded_field_names()
    if not fill_cache:
        fields = self.model._meta.concrete_fields
    load_fields = []
    # If only/defer clauses have been specified,
    # build the list of fields that are to be loaded.
    if only_load:
        for field, model in self.model._meta.get_concrete_fields_with_model():
            if model is None:
                model = self.model
            try:
                if field.name in only_load[model]:
                    # Add a field that has been explicitly included
                    load_fields.append(field.name)
            except KeyError:
                # Model wasn't explicitly listed in the only_load table
                # Therefore, we need to load all fields from this model
                load_fields.append(field.name)
    index_start = len(extra_select)
    aggregate_start = index_start + len(load_fields or self.model._meta.concrete_fields)
    skip = None
    if load_fields and not fill_cache:
        # Some fields have been deferred, so we have to initialise
        # via keyword arguments.
        skip = set()
        init_list = []
        for field in fields:
            if field.name not in load_fields:
                skip.add(field.attname)
            else:
                init_list.append(field.attname)
        model_cls = deferred_class_factory(self.model, skip)
    # Cache db and model outside the loop
    db = self.db
    model = self.model
    compiler = self.query.get_compiler(using=db)
    if fill_cache:
        klass_info = get_klass_info(model, max_depth=max_depth,
                                    requested=requested, only_load=only_load)
    for row in compiler.results_iter():
        if fill_cache:
            obj, _ = get_cached_row(row, index_start, db, klass_info,
                                    offset=len(aggregate_select))
        else:
            # Omit aggregates in object creation.
            row_data = row[index_start:aggregate_start]
            if skip:
                obj = model_cls(**dict(zip(init_list, row_data)))
            else:
                obj = model(*row_data)
            # Store the source database of the object
            obj._state.db = db
            # This object came from the database; it's not being added.
            obj._state.adding = False
        if extra_select:
            for i, k in enumerate(extra_select):
                setattr(obj, k, row[i])
        # Add the aggregates to the model
        if aggregate_select:
            for i, aggregate in enumerate(aggregate_select):
                setattr(obj, aggregate, row[i + aggregate_start])
        # Add the known related objects to the model, if there are any
        if self._known_related_objects:
            for field, rel_objs in self._known_related_objects.items():
                # Avoid overwriting objects loaded e.g. by select_related
                if hasattr(obj, field.get_cache_name()):
                    continue
                pk = getattr(obj, field.get_attname())
                try:
                    rel_obj = rel_objs[pk]
                except KeyError:
                    pass               # may happen in qs1 | qs2 scenarios
                else:
                    setattr(obj, field.name, rel_obj)
        yield obj

def last(

self)

Returns the last object of a query, returns None if no match is found.

def last(self):
    """
    Returns the last object of a query, returns None if no match is found.
    """
    qs = self.reverse() if self.ordered else self.order_by('-pk')
    try:
        return qs[0]
    except IndexError:
        return None

def latest(

self, field_name=None)

def latest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="-")

def none(

self)

Return an empty result set

def none(self): # redundant with none() in recent Django svn
    "Return an empty result set"
    return self.extra(where = ["0 = 1"])

def only(

self, *fields)

Essentially, the opposite of defer. Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated.

def only(self, *fields):
    """
    Essentially, the opposite of defer. Only the fields passed into this
    method and that are not already specified as deferred are loaded
    immediately when the queryset is evaluated.
    """
    if fields == (None,):
        # Can only pass None to defer(), not only(), as the rest option.
        # That won't stop people trying to do this, so let's be explicit.
        raise TypeError("Cannot pass None as an argument to only().")
    clone = self._clone()
    clone.query.add_immediate_loading(fields)
    return clone

def order_by(

self, *field_names)

Returns a new QuerySet instance with the ordering changed.

def order_by(self, *field_names):
    """
    Returns a new QuerySet instance with the ordering changed.
    """
    assert self.query.can_filter(), \
            "Cannot reorder a query once a slice has been taken."
    obj = self._clone()
    obj.query.clear_ordering(force_empty=False)
    obj.query.add_ordering(*field_names)
    return obj

Returns a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated.

When prefetch_related() is called more than once, the list of lookups to prefetch is appended to. If prefetch_related(None) is called, the the list is cleared.

def reverse(

self)

Reverses the ordering of the QuerySet.

def reverse(self):
    """
    Reverses the ordering of the QuerySet.
    """
    clone = self._clone()
    clone.query.standard_ordering = not clone.query.standard_ordering
    return clone

def select_for_update(

self, **kwargs)

Returns a new QuerySet instance that will select objects with a FOR UPDATE lock.

def select_for_update(self, **kwargs):
    """
    Returns a new QuerySet instance that will select objects with a
    FOR UPDATE lock.
    """
    # Default to false for nowait
    nowait = kwargs.pop('nowait', False)
    obj = self._clone()
    obj._for_write = True
    obj.query.select_for_update = True
    obj.query.select_for_update_nowait = nowait
    return obj

Returns a new QuerySet instance that will select related objects.

If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection.

If select_related(None) is called, the list is cleared.

def update(

self, **kwargs)

Updates all elements in the current QuerySet, setting all the given fields to the appropriate values.

def update(self, **kwargs):
    """
    Updates all elements in the current QuerySet, setting all the given
    fields to the appropriate values.
    """
    assert self.query.can_filter(), \
            "Cannot update a query once a slice has been taken."
    self._for_write = True
    query = self.query.clone(sql.UpdateQuery)
    query.add_update_values(kwargs)
    with transaction.commit_on_success_unless_managed(using=self.db):
        rows = query.get_compiler(self.db).execute_sql(None)
    self._result_cache = None
    return rows

def using(

self, alias)

Selects which database this QuerySet should excecute its query against.

def using(self, alias):
    """
    Selects which database this QuerySet should excecute its query against.
    """
    clone = self._clone()
    clone._db = alias
    return clone

def values(

self, *fields)

def values(self, *fields):
    return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)

def values_list(

self, *fields, **kwargs)

def values_list(self, *fields, **kwargs):
    flat = kwargs.pop('flat', False)
    if kwargs:
        raise TypeError('Unexpected keyword arguments to values_list: %s'
                % (list(kwargs),))
    if flat and len(fields) > 1:
        raise TypeError("'flat' is not valid when values_list is called with more than one field.")
    return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
            _fields=fields)

class MediaCollectionManager

Manage collection queries

class MediaCollectionManager(CoreManager):
    "Manage collection queries"

    def get_query_set(self):
        "Return the collection query"
        return MediaCollectionQuerySet(self.model)

    def enriched(self):
        "Query set with additional virtual fields such as apparent_collector"
        return self.get_query_set().virtual('apparent_collector')

    def quick_search(self, *args, **kwargs):
        return self.get_query_set().quick_search(*args, **kwargs)
    quick_search.__doc__ = MediaCollectionQuerySet.quick_search.__doc__

    def by_location(self, *args, **kwargs):
        return self.get_query_set().by_location(*args, **kwargs)
    by_location.__doc__ = MediaCollectionQuerySet.by_location.__doc__

    def by_recording_year(self, *args, **kwargs):
        return self.get_query_set().by_recording_year(*args, **kwargs)
    by_recording_year.__doc__ = MediaCollectionQuerySet.by_recording_year.__doc__

    def by_publish_year(self, *args, **kwargs):
        return self.get_query_set().by_publish_year(*args, **kwargs)
    by_publish_year.__doc__ = MediaCollectionQuerySet.by_publish_year.__doc__

    def by_ethnic_group(self, *args, **kwargs):
        return self.get_query_set().by_ethnic_group(*args, **kwargs)
    by_ethnic_group.__doc__ = MediaCollectionQuerySet.by_ethnic_group.__doc__

    def by_change_time(self, *args, **kwargs):
        return self.get_query_set().by_change_time(*args, **kwargs)
    by_change_time.__doc__ = MediaCollectionQuerySet.by_change_time.__doc__

    @staticmethod
    def __name_cmp(obj1, obj2):
        return unaccent_icmp(obj1.name, obj2.name)

    def sound(self, *args, **kwargs):
        return self.get_query_set().sound(*args, **kwargs)
    sound.__doc__ = MediaCollectionQuerySet.sound.__doc__

    def by_instrument(self, *args, **kwargs):
        return self.get_query_set().by_instrument(*args, **kwargs)
    by_instrument.__doc__ = MediaCollectionQuerySet.by_instrument.__doc__

Ancestors (in MRO)

  • MediaCollectionManager
  • telemeta.models.core.CoreManager
  • telemeta.models.core.EnhancedManager
  • django.db.models.manager.Manager
  • __builtin__.object

Class variables

var creation_counter

Instance variables

var db

Methods

def __init__(

self)

def __init__(self):
    super(Manager, self).__init__()
    self._set_creation_counter()
    self.model = None
    self._inherited = False
    self._db = None

def aggregate(

self, *args, **kwargs)

def aggregate(self, *args, **kwargs):
    return self.get_queryset().aggregate(*args, **kwargs)

def all(

self)

def all(self):
    return self.get_queryset()

def annotate(

self, *args, **kwargs)

def annotate(self, *args, **kwargs):
    return self.get_queryset().annotate(*args, **kwargs)

def bulk_create(

self, *args, **kwargs)

def bulk_create(self, *args, **kwargs):
    return self.get_queryset().bulk_create(*args, **kwargs)

def by_change_time(

self, *args, **kwargs)

Find collections between two dates

def by_change_time(self, *args, **kwargs):
    return self.get_query_set().by_change_time(*args, **kwargs)

def by_ethnic_group(

self, *args, **kwargs)

Find collections by ethnic group

def by_ethnic_group(self, *args, **kwargs):
    return self.get_query_set().by_ethnic_group(*args, **kwargs)

def by_instrument(

self, *args, **kwargs)

Find collections by instrument

def by_instrument(self, *args, **kwargs):
    return self.get_query_set().by_instrument(*args, **kwargs)

def by_location(

self, *args, **kwargs)

Find collections by location

def by_location(self, *args, **kwargs):
    return self.get_query_set().by_location(*args, **kwargs)

def by_publish_year(

self, *args, **kwargs)

Find collections by publishing year

def by_publish_year(self, *args, **kwargs):
    return self.get_query_set().by_publish_year(*args, **kwargs)

def by_recording_year(

self, *args, **kwargs)

Find collections by recording year

def by_recording_year(self, *args, **kwargs):
    return self.get_query_set().by_recording_year(*args, **kwargs)

def complex_filter(

self, *args, **kwargs)

def complex_filter(self, *args, **kwargs):
    return self.get_queryset().complex_filter(*args, **kwargs)

def contribute_to_class(

self, model, name)

def contribute_to_class(self, model, name):
    # TODO: Use weakref because of possible memory leak / circular reference.
    self.model = model
    # Only contribute the manager if the model is concrete
    if model._meta.abstract:
        setattr(model, name, AbstractManagerDescriptor(model))
    elif model._meta.swapped:
        setattr(model, name, SwappedManagerDescriptor(model))
    else:
    # if not model._meta.abstract and not model._meta.swapped:
        setattr(model, name, ManagerDescriptor(self))
    if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
        model._default_manager = self
    if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
        model._meta.abstract_managers.append((self.creation_counter, name,
                self))
    else:
        model._meta.concrete_managers.append((self.creation_counter, name,
            self))

def count(

self)

def count(self):
    return self.get_queryset().count()

def create(

self, **kwargs)

def create(self, **kwargs):
    return self.get_queryset().create(**kwargs)

def dates(

self, *args, **kwargs)

def dates(self, *args, **kwargs):
    return self.get_queryset().dates(*args, **kwargs)

def datetimes(

self, *args, **kwargs)

def datetimes(self, *args, **kwargs):
    return self.get_queryset().datetimes(*args, **kwargs)

def db_manager(

self, using)

def db_manager(self, using):
    obj = copy.copy(self)
    obj._db = using
    return obj

def defer(

self, *args, **kwargs)

def defer(self, *args, **kwargs):
    return self.get_queryset().defer(*args, **kwargs)

def distinct(

self, *args, **kwargs)

def distinct(self, *args, **kwargs):
    return self.get_queryset().distinct(*args, **kwargs)

def earliest(

self, *args, **kwargs)

def earliest(self, *args, **kwargs):
    return self.get_queryset().earliest(*args, **kwargs)

def enriched(

self)

Query set with additional virtual fields such as apparent_collector

def enriched(self):
    "Query set with additional virtual fields such as apparent_collector"
    return self.get_query_set().virtual('apparent_collector')

def exclude(

self, *args, **kwargs)

def exclude(self, *args, **kwargs):
    return self.get_queryset().exclude(*args, **kwargs)

def exists(

self, *args, **kwargs)

def exists(self, *args, **kwargs):
    return self.get_queryset().exists(*args, **kwargs)

def extra(

self, *args, **kwargs)

def extra(self, *args, **kwargs):
    return self.get_queryset().extra(*args, **kwargs)

def filter(

self, *args, **kwargs)

def filter(self, *args, **kwargs):
    return self.get_queryset().filter(*args, **kwargs)

def first(

self)

def first(self):
    return self.get_queryset().first()

def get(

self, **kwargs)

def get(self, **kwargs):
    if kwargs.has_key('public_id'):
        try:
            args = kwargs.copy()
            args['code'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
        except ObjectDoesNotExist:
            args = kwargs.copy()
            args['id'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
    return super(CoreManager, self).get(**kwargs)

def get_or_create(

self, **kwargs)

def get_or_create(self, **kwargs):
    return self.get_queryset().get_or_create(**kwargs)

def get_query_set(

*args, **kwargs)

def wrapped(*args, **kwargs):
    warnings.warn(
        "`%s.%s` is deprecated, use `%s` instead." %
        (self.class_name, self.old_method_name, self.new_method_name),
        self.deprecation_warning, 2)
    return f(*args, **kwargs)

def get_queryset(

self)

Return the collection query

def get_query_set(self):
    "Return the collection query"
    return MediaCollectionQuerySet(self.model)

def in_bulk(

self, *args, **kwargs)

def in_bulk(self, *args, **kwargs):
    return self.get_queryset().in_bulk(*args, **kwargs)

def iterator(

self, *args, **kwargs)

def iterator(self, *args, **kwargs):
    return self.get_queryset().iterator(*args, **kwargs)

def last(

self)

def last(self):
    return self.get_queryset().last()

def latest(

self, *args, **kwargs)

def latest(self, *args, **kwargs):
    return self.get_queryset().latest(*args, **kwargs)

def none(

self, *args, **kwargs)

def none(self, *args, **kwargs):
    ""
    return self.get_query_set().none(*args, **kwargs)

def only(

self, *args, **kwargs)

def only(self, *args, **kwargs):
    return self.get_queryset().only(*args, **kwargs)

def order_by(

self, *args, **kwargs)

def order_by(self, *args, **kwargs):
    return self.get_queryset().order_by(*args, **kwargs)

Perform a quick search on code, title and collector name

def raw(

self, raw_query, params=None, *args, **kwargs)

def raw(self, raw_query, params=None, *args, **kwargs):
    return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)

def reverse(

self, *args, **kwargs)

def reverse(self, *args, **kwargs):
    return self.get_queryset().reverse(*args, **kwargs)

def select_for_update(

self, *args, **kwargs)

def select_for_update(self, *args, **kwargs):
    return self.get_queryset().select_for_update(*args, **kwargs)

def sound(

self, *args, **kwargs)

def sound(self, *args, **kwargs):
    return self.get_query_set().sound(*args, **kwargs)

def update(

self, *args, **kwargs)

def update(self, *args, **kwargs):
    return self.get_queryset().update(*args, **kwargs)

def using(

self, *args, **kwargs)

def using(self, *args, **kwargs):
    return self.get_queryset().using(*args, **kwargs)

def values(

self, *args, **kwargs)

def values(self, *args, **kwargs):
    return self.get_queryset().values(*args, **kwargs)

def values_list(

self, *args, **kwargs)

def values_list(self, *args, **kwargs):
    return self.get_queryset().values_list(*args, **kwargs)

class MediaCollectionQuerySet

class MediaCollectionQuerySet(CoreQuerySet):

    def quick_search(self, pattern):
        "Perform a quick search on code, title and collector name"
        from telemeta.models.collection import MediaCollection
        pattern = pattern.strip()
        mod = MediaCollection()
        fields = mod.to_dict()
        keys =  fields.keys()
        q = self.by_fuzzy_collector_q(pattern)
        for field in keys:
            field_str = str(mod._meta.get_field(field))
            if 'CharField' in field_str or 'TextField' in field_str:
                q = q | word_search_q(field, pattern)
        return self.filter(q)

    def by_location(self, location):
        "Find collections by location"
        return self.filter(items__location__in=location.apparented()).distinct()

    def by_recording_year(self, from_year, to_year=None):
        "Find collections by recording year"
        if to_year is None:
            return (self.filter(recorded_from_year__lte=from_year, recorded_to_year__gte=from_year))
        else:
            return (self.filter(Q(recorded_from_year__range=(from_year, to_year)) |
                    Q(recorded_to_year__range=(from_year, to_year))))

    def by_publish_year(self, from_year, to_year=None):
        "Find collections by publishing year"
        if to_year is None:
            to_year = from_year
        return self.filter(year_published__range=(from_year, to_year))

    def by_ethnic_group(self, group):
        "Find collections by ethnic group"
        return self.filter(items__ethnic_group=group).distinct()

    def by_change_time(self, from_time=None, until_time=None):
        "Find collections between two dates"
        return self._by_change_time('collection', from_time, until_time)

    def virtual(self, *args):
        qs = self
        for f in args:
            if f == 'apparent_collector':
                if not 'sqlite3' in engine and not 'postgresql_psycopg2' in engine:
                    qs = qs.extra(select={f: 'IF(media_collections.collector_is_creator, '
                                         'media_collections.creator, media_collections.collector)'})
            else:
                raise Exception("Unsupported virtual field: %s" % f)

        return qs

    def recording_year_range(self):
        from_max = self.aggregate(Max('recorded_from_year'))['recorded_from_year__max']
        to_max   = self.aggregate(Max('recorded_to_year'))['recorded_to_year__max']
        year_max = max(from_max, to_max)

        from_min = self.filter(recorded_from_year__gt=0).aggregate(Min('recorded_from_year'))['recorded_from_year__min']
        to_min   = self.filter(recorded_to_year__gt=0).aggregate(Min('recorded_to_year'))['recorded_to_year__min']
        year_min = min(from_min, to_min)

        if not year_max:
            year_max = year_min
        elif not year_min:
            year_min = year_max

        return year_min, year_max

    def publishing_year_range(self):
        year_max = self.aggregate(Max('year_published'))['year_published__max']
        year_min = self.filter(year_published__gt=0).aggregate(Min('year_published'))['year_published__min']

        return year_min, year_max

    @staticmethod
    def by_fuzzy_collector_q(pattern):
        return word_search_q('creator', pattern) | word_search_q('collector', pattern)

    def by_fuzzy_collector(self, pattern):
        return self.filter(self.by_fuzzy_collector_q(pattern))

    def sound(self):
        return self.filter(Q(items__file__contains='/') | Q(items__url__contains='/')).distinct()

    def by_instrument(self, name):
        "Find collections by instrument"
        from telemeta.models.item import MediaItemPerformance
        from telemeta.models.instrument import Instrument, InstrumentAlias
        instruments = Instrument.objects.filter(name__icontains=name)
        aliases = InstrumentAlias.objects.filter(name__icontains=name)
        items = []
        performances = MediaItemPerformance.objects.filter(Q(instrument__in=instruments) | Q(alias__in=aliases))
        for performance in performances:
            items.append(performance.media_item)
        return self.filter(items__in=items).distinct()

Ancestors (in MRO)

  • MediaCollectionQuerySet
  • telemeta.models.core.CoreQuerySet
  • telemeta.models.core.EnhancedQuerySet
  • django.db.models.query.QuerySet
  • __builtin__.object

Class variables

var value_annotation

Static methods

def by_fuzzy_collector_q(

pattern)

@staticmethod
def by_fuzzy_collector_q(pattern):
    return word_search_q('creator', pattern) | word_search_q('collector', pattern)

Instance variables

var db

Return the database that will be used if this query is executed now

var ordered

Returns True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model.

Methods

def __init__(

self, model=None, query=None, using=None)

def __init__(self, model=None, query=None, using=None):
    self.model = model
    self._db = using
    self.query = query or sql.Query(self.model)
    self._result_cache = None
    self._sticky_filter = False
    self._for_write = False
    self._prefetch_related_lookups = []
    self._prefetch_done = False
    self._known_related_objects = {}        # {rel_field, {pk: rel_obj}}

def aggregate(

self, *args, **kwargs)

Returns a dictionary containing the calculations (aggregation) over the current queryset

If args is present the expression is passed as a kwarg using the Aggregate object's default alias.

def aggregate(self, *args, **kwargs):
    """
    Returns a dictionary containing the calculations (aggregation)
    over the current queryset
    If args is present the expression is passed as a kwarg using
    the Aggregate object's default alias.
    """
    if self.query.distinct_fields:
        raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
    for arg in args:
        kwargs[arg.default_alias] = arg
    query = self.query.clone()
    for (alias, aggregate_expr) in kwargs.items():
        query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=True)
    return query.get_aggregation(using=self.db)

def all(

self)

Returns a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases.

def all(self):
    """
    Returns a new QuerySet that is a copy of the current one. This allows a
    QuerySet to proxy for a model manager in some cases.
    """
    return self._clone()

def annotate(

self, *args, **kwargs)

Return a query set in which the returned objects have been annotated with data aggregated from related fields.

def annotate(self, *args, **kwargs):
    """
    Return a query set in which the returned objects have been annotated
    with data aggregated from related fields.
    """
    for arg in args:
        if arg.default_alias in kwargs:
            raise ValueError("The named annotation '%s' conflicts with the "
                             "default name for another annotation."
                             % arg.default_alias)
        kwargs[arg.default_alias] = arg
    names = getattr(self, '_fields', None)
    if names is None:
        names = set(self.model._meta.get_all_field_names())
    for aggregate in kwargs:
        if aggregate in names:
            raise ValueError("The annotation '%s' conflicts with a field on "
                "the model." % aggregate)
    obj = self._clone()
    obj._setup_aggregate_query(list(kwargs))
    # Add the aggregates to the query
    for (alias, aggregate_expr) in kwargs.items():
        obj.query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=False)
    return obj

def bulk_create(

self, objs, batch_size=None)

Inserts each of the instances into the database. This does not call save() on each of the instances, does not send any pre/post save signals, and does not set the primary key attribute if it is an autoincrement field.

def bulk_create(self, objs, batch_size=None):
    """
    Inserts each of the instances into the database. This does *not* call
    save() on each of the instances, does not send any pre/post save
    signals, and does not set the primary key attribute if it is an
    autoincrement field.
    """
    # So this case is fun. When you bulk insert you don't get the primary
    # keys back (if it's an autoincrement), so you can't insert into the
    # child tables which references this. There are two workarounds, 1)
    # this could be implemented if you didn't have an autoincrement pk,
    # and 2) you could do it by doing O(n) normal inserts into the parent
    # tables to get the primary keys back, and then doing a single bulk
    # insert into the childmost table. Some databases might allow doing
    # this by using RETURNING clause for the insert query. We're punting
    # on these for now because they are relatively rare cases.
    assert batch_size is None or batch_size > 0
    if self.model._meta.parents:
        raise ValueError("Can't bulk create an inherited model")
    if not objs:
        return objs
    self._for_write = True
    connection = connections[self.db]
    fields = self.model._meta.local_concrete_fields
    with transaction.commit_on_success_unless_managed(using=self.db):
        if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
            and self.model._meta.has_auto_field):
            self._batched_insert(objs, fields, batch_size)
        else:
            objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
            if objs_with_pk:
                self._batched_insert(objs_with_pk, fields, batch_size)
            if objs_without_pk:
                fields= [f for f in fields if not isinstance(f, AutoField)]
                self._batched_insert(objs_without_pk, fields, batch_size)
    return objs

def by_change_time(

self, from_time=None, until_time=None)

Find collections between two dates

def by_change_time(self, from_time=None, until_time=None):
    "Find collections between two dates"
    return self._by_change_time('collection', from_time, until_time)

def by_ethnic_group(

self, group)

Find collections by ethnic group

def by_ethnic_group(self, group):
    "Find collections by ethnic group"
    return self.filter(items__ethnic_group=group).distinct()

def by_fuzzy_collector(

self, pattern)

def by_fuzzy_collector(self, pattern):
    return self.filter(self.by_fuzzy_collector_q(pattern))

def by_instrument(

self, name)

Find collections by instrument

def by_instrument(self, name):
    "Find collections by instrument"
    from telemeta.models.item import MediaItemPerformance
    from telemeta.models.instrument import Instrument, InstrumentAlias
    instruments = Instrument.objects.filter(name__icontains=name)
    aliases = InstrumentAlias.objects.filter(name__icontains=name)
    items = []
    performances = MediaItemPerformance.objects.filter(Q(instrument__in=instruments) | Q(alias__in=aliases))
    for performance in performances:
        items.append(performance.media_item)
    return self.filter(items__in=items).distinct()

def by_location(

self, location)

Find collections by location

def by_location(self, location):
    "Find collections by location"
    return self.filter(items__location__in=location.apparented()).distinct()

def by_publish_year(

self, from_year, to_year=None)

Find collections by publishing year

def by_publish_year(self, from_year, to_year=None):
    "Find collections by publishing year"
    if to_year is None:
        to_year = from_year
    return self.filter(year_published__range=(from_year, to_year))

def by_recording_year(

self, from_year, to_year=None)

Find collections by recording year

def by_recording_year(self, from_year, to_year=None):
    "Find collections by recording year"
    if to_year is None:
        return (self.filter(recorded_from_year__lte=from_year, recorded_to_year__gte=from_year))
    else:
        return (self.filter(Q(recorded_from_year__range=(from_year, to_year)) |
                Q(recorded_to_year__range=(from_year, to_year))))

def complex_filter(

self, filter_obj)

Returns a new QuerySet instance with filter_obj added to the filters.

filter_obj can be a Q object (or anything with an add_to_query() method) or a dictionary of keyword lookup arguments.

This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods.

def complex_filter(self, filter_obj):
    """
    Returns a new QuerySet instance with filter_obj added to the filters.
    filter_obj can be a Q object (or anything with an add_to_query()
    method) or a dictionary of keyword lookup arguments.
    This exists to support framework features such as 'limit_choices_to',
    and usually it will be more natural to use other methods.
    """
    if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
        clone = self._clone()
        clone.query.add_q(filter_obj)
        return clone
    else:
        return self._filter_or_exclude(None, **filter_obj)

def count(

self)

Performs a SELECT COUNT() and returns the number of records as an integer.

If the QuerySet is already fully cached this simply returns the length of the cached results set to avoid multiple SELECT COUNT(*) calls.

def count(self):
    """
    Performs a SELECT COUNT() and returns the number of records as an
    integer.
    If the QuerySet is already fully cached this simply returns the length
    of the cached results set to avoid multiple SELECT COUNT(*) calls.
    """
    if self._result_cache is not None:
        return len(self._result_cache)
    return self.query.get_count(using=self.db)

def create(

self, **kwargs)

Creates a new object with the given kwargs, saving it to the database and returning the created object.

def create(self, **kwargs):
    """
    Creates a new object with the given kwargs, saving it to the database
    and returning the created object.
    """
    obj = self.model(**kwargs)
    self._for_write = True
    obj.save(force_insert=True, using=self.db)
    return obj

def dates(

self, field_name, kind, order='ASC')

Returns a list of date objects representing all available dates for the given field_name, scoped to 'kind'.

def dates(self, field_name, kind, order='ASC'):
    """
    Returns a list of date objects representing all available dates for
    the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day"), \
            "'kind' must be one of 'year', 'month' or 'day'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    return self._clone(klass=DateQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order)

def datetimes(

self, field_name, kind, order='ASC', tzinfo=None)

Returns a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'.

def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
    """
    Returns a list of datetime objects representing all available
    datetimes for the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day", "hour", "minute", "second"), \
            "'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    if settings.USE_TZ:
        if tzinfo is None:
            tzinfo = timezone.get_current_timezone()
    else:
        tzinfo = None
    return self._clone(klass=DateTimeQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order, _tzinfo=tzinfo)

def defer(

self, *fields)

Defers the loading of data for certain fields until they are accessed. The set of fields to defer is added to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case all deferrals are removed (None acts as a reset option).

def defer(self, *fields):
    """
    Defers the loading of data for certain fields until they are accessed.
    The set of fields to defer is added to any existing set of deferred
    fields. The only exception to this is if None is passed in as the only
    parameter, in which case all deferrals are removed (None acts as a
    reset option).
    """
    clone = self._clone()
    if fields == (None,):
        clone.query.clear_deferred_loading()
    else:
        clone.query.add_deferred_loading(fields)
    return clone

def delete(

self)

def delete(self):
    CHUNK=1024
    objects = self.model._meta.get_all_related_objects()
    ii = self.count()
    values = self.values_list('pk')
    for related in objects:
        i = 0
        while i < ii:
            ids = [v[0] for v in values[i:i + CHUNK]]
            filter = {related.field.name + '__pk__in': ids}
            q = related.model.objects.filter(**filter)
            if isinstance(related.field, WeakForeignKey):
                update = {related.field.name: None}
                q.update(**update)
            else:
                q.delete()
            i += CHUNK
    super(EnhancedQuerySet, self).delete()

def distinct(

self, *field_names)

Returns a new QuerySet instance that will select only distinct results.

def distinct(self, *field_names):
    """
    Returns a new QuerySet instance that will select only distinct results.
    """
    assert self.query.can_filter(), \
            "Cannot create distinct fields once a slice has been taken."
    obj = self._clone()
    obj.query.add_distinct_fields(*field_names)
    return obj

def earliest(

self, field_name=None)

def earliest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="")

def exclude(

self, *args, **kwargs)

Returns a new QuerySet instance with NOT (args) ANDed to the existing set.

def exclude(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with NOT (args) ANDed to the existing
    set.
    """
    return self._filter_or_exclude(True, *args, **kwargs)

def exists(

self)

def exists(self):
    if self._result_cache is None:
        return self.query.has_results(using=self.db)
    return bool(self._result_cache)

def extra(

self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None)

Adds extra SQL fragments to the query.

def extra(self, select=None, where=None, params=None, tables=None,
          order_by=None, select_params=None):
    """
    Adds extra SQL fragments to the query.
    """
    assert self.query.can_filter(), \
            "Cannot change a query once a slice has been taken"
    clone = self._clone()
    clone.query.add_extra(select, select_params, where, params, tables, order_by)
    return clone

def filter(

self, *args, **kwargs)

Returns a new QuerySet instance with the args ANDed to the existing set.

def filter(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with the args ANDed to the existing
    set.
    """
    return self._filter_or_exclude(False, *args, **kwargs)

def first(

self)

Returns the first object of a query, returns None if no match is found.

def first(self):
    """
    Returns the first object of a query, returns None if no match is found.
    """
    qs = self if self.ordered else self.order_by('pk')
    try:
        return qs[0]
    except IndexError:
        return None

def get(

self, *args, **kwargs)

Performs the query and returns a single object matching the given keyword arguments.

def get(self, *args, **kwargs):
    """
    Performs the query and returns a single object matching the given
    keyword arguments.
    """
    clone = self.filter(*args, **kwargs)
    if self.query.can_filter():
        clone = clone.order_by()
    num = len(clone)
    if num == 1:
        return clone._result_cache[0]
    if not num:
        raise self.model.DoesNotExist(
            "%s matching query does not exist." %
            self.model._meta.object_name)
    raise self.model.MultipleObjectsReturned(
        "get() returned more than one %s -- it returned %s!" %
        (self.model._meta.object_name, num))

def get_or_create(

self, **kwargs)

Looks up an object with the given kwargs, creating one if necessary. Returns a tuple of (object, created), where created is a boolean specifying whether an object was created.

def get_or_create(self, **kwargs):
    """
    Looks up an object with the given kwargs, creating one if necessary.
    Returns a tuple of (object, created), where created is a boolean
    specifying whether an object was created.
    """
    defaults = kwargs.pop('defaults', {})
    lookup = kwargs.copy()
    for f in self.model._meta.fields:
        if f.attname in lookup:
            lookup[f.name] = lookup.pop(f.attname)
    try:
        self._for_write = True
        return self.get(**lookup), False
    except self.model.DoesNotExist:
        try:
            params = dict((k, v) for k, v in kwargs.items() if LOOKUP_SEP not in k)
            params.update(defaults)
            obj = self.model(**params)
            with transaction.atomic(using=self.db):
                obj.save(force_insert=True, using=self.db)
            return obj, True
        except DatabaseError:
            exc_info = sys.exc_info()
            try:
                return self.get(**lookup), False
            except self.model.DoesNotExist:
                # Re-raise the DatabaseError with its original traceback.
                six.reraise(*exc_info)

def in_bulk(

self, id_list)

Returns a dictionary mapping each of the given IDs to the object with that ID.

def in_bulk(self, id_list):
    """
    Returns a dictionary mapping each of the given IDs to the object with
    that ID.
    """
    assert self.query.can_filter(), \
            "Cannot use 'limit' or 'offset' with in_bulk"
    if not id_list:
        return {}
    qs = self.filter(pk__in=id_list).order_by()
    return dict([(obj._get_pk_val(), obj) for obj in qs])

def iterator(

self)

An iterator over the results from applying this QuerySet to the database.

def iterator(self):
    """
    An iterator over the results from applying this QuerySet to the
    database.
    """
    fill_cache = False
    if connections[self.db].features.supports_select_related:
        fill_cache = self.query.select_related
    if isinstance(fill_cache, dict):
        requested = fill_cache
    else:
        requested = None
    max_depth = self.query.max_depth
    extra_select = list(self.query.extra_select)
    aggregate_select = list(self.query.aggregate_select)
    only_load = self.query.get_loaded_field_names()
    if not fill_cache:
        fields = self.model._meta.concrete_fields
    load_fields = []
    # If only/defer clauses have been specified,
    # build the list of fields that are to be loaded.
    if only_load:
        for field, model in self.model._meta.get_concrete_fields_with_model():
            if model is None:
                model = self.model
            try:
                if field.name in only_load[model]:
                    # Add a field that has been explicitly included
                    load_fields.append(field.name)
            except KeyError:
                # Model wasn't explicitly listed in the only_load table
                # Therefore, we need to load all fields from this model
                load_fields.append(field.name)
    index_start = len(extra_select)
    aggregate_start = index_start + len(load_fields or self.model._meta.concrete_fields)
    skip = None
    if load_fields and not fill_cache:
        # Some fields have been deferred, so we have to initialise
        # via keyword arguments.
        skip = set()
        init_list = []
        for field in fields:
            if field.name not in load_fields:
                skip.add(field.attname)
            else:
                init_list.append(field.attname)
        model_cls = deferred_class_factory(self.model, skip)
    # Cache db and model outside the loop
    db = self.db
    model = self.model
    compiler = self.query.get_compiler(using=db)
    if fill_cache:
        klass_info = get_klass_info(model, max_depth=max_depth,
                                    requested=requested, only_load=only_load)
    for row in compiler.results_iter():
        if fill_cache:
            obj, _ = get_cached_row(row, index_start, db, klass_info,
                                    offset=len(aggregate_select))
        else:
            # Omit aggregates in object creation.
            row_data = row[index_start:aggregate_start]
            if skip:
                obj = model_cls(**dict(zip(init_list, row_data)))
            else:
                obj = model(*row_data)
            # Store the source database of the object
            obj._state.db = db
            # This object came from the database; it's not being added.
            obj._state.adding = False
        if extra_select:
            for i, k in enumerate(extra_select):
                setattr(obj, k, row[i])
        # Add the aggregates to the model
        if aggregate_select:
            for i, aggregate in enumerate(aggregate_select):
                setattr(obj, aggregate, row[i + aggregate_start])
        # Add the known related objects to the model, if there are any
        if self._known_related_objects:
            for field, rel_objs in self._known_related_objects.items():
                # Avoid overwriting objects loaded e.g. by select_related
                if hasattr(obj, field.get_cache_name()):
                    continue
                pk = getattr(obj, field.get_attname())
                try:
                    rel_obj = rel_objs[pk]
                except KeyError:
                    pass               # may happen in qs1 | qs2 scenarios
                else:
                    setattr(obj, field.name, rel_obj)
        yield obj

def last(

self)

Returns the last object of a query, returns None if no match is found.

def last(self):
    """
    Returns the last object of a query, returns None if no match is found.
    """
    qs = self.reverse() if self.ordered else self.order_by('-pk')
    try:
        return qs[0]
    except IndexError:
        return None

def latest(

self, field_name=None)

def latest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="-")

def none(

self)

Return an empty result set

def none(self): # redundant with none() in recent Django svn
    "Return an empty result set"
    return self.extra(where = ["0 = 1"])

def only(

self, *fields)

Essentially, the opposite of defer. Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated.

def only(self, *fields):
    """
    Essentially, the opposite of defer. Only the fields passed into this
    method and that are not already specified as deferred are loaded
    immediately when the queryset is evaluated.
    """
    if fields == (None,):
        # Can only pass None to defer(), not only(), as the rest option.
        # That won't stop people trying to do this, so let's be explicit.
        raise TypeError("Cannot pass None as an argument to only().")
    clone = self._clone()
    clone.query.add_immediate_loading(fields)
    return clone

def order_by(

self, *field_names)

Returns a new QuerySet instance with the ordering changed.

def order_by(self, *field_names):
    """
    Returns a new QuerySet instance with the ordering changed.
    """
    assert self.query.can_filter(), \
            "Cannot reorder a query once a slice has been taken."
    obj = self._clone()
    obj.query.clear_ordering(force_empty=False)
    obj.query.add_ordering(*field_names)
    return obj

Returns a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated.

When prefetch_related() is called more than once, the list of lookups to prefetch is appended to. If prefetch_related(None) is called, the the list is cleared.

def publishing_year_range(

self)

def publishing_year_range(self):
    year_max = self.aggregate(Max('year_published'))['year_published__max']
    year_min = self.filter(year_published__gt=0).aggregate(Min('year_published'))['year_published__min']
    return year_min, year_max

Perform a quick search on code, title and collector name

def recording_year_range(

self)

def recording_year_range(self):
    from_max = self.aggregate(Max('recorded_from_year'))['recorded_from_year__max']
    to_max   = self.aggregate(Max('recorded_to_year'))['recorded_to_year__max']
    year_max = max(from_max, to_max)
    from_min = self.filter(recorded_from_year__gt=0).aggregate(Min('recorded_from_year'))['recorded_from_year__min']
    to_min   = self.filter(recorded_to_year__gt=0).aggregate(Min('recorded_to_year'))['recorded_to_year__min']
    year_min = min(from_min, to_min)
    if not year_max:
        year_max = year_min
    elif not year_min:
        year_min = year_max
    return year_min, year_max

def reverse(

self)

Reverses the ordering of the QuerySet.

def reverse(self):
    """
    Reverses the ordering of the QuerySet.
    """
    clone = self._clone()
    clone.query.standard_ordering = not clone.query.standard_ordering
    return clone

def select_for_update(

self, **kwargs)

Returns a new QuerySet instance that will select objects with a FOR UPDATE lock.

def select_for_update(self, **kwargs):
    """
    Returns a new QuerySet instance that will select objects with a
    FOR UPDATE lock.
    """
    # Default to false for nowait
    nowait = kwargs.pop('nowait', False)
    obj = self._clone()
    obj._for_write = True
    obj.query.select_for_update = True
    obj.query.select_for_update_nowait = nowait
    return obj

Returns a new QuerySet instance that will select related objects.

If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection.

If select_related(None) is called, the list is cleared.

def sound(

self)

def sound(self):
    return self.filter(Q(items__file__contains='/') | Q(items__url__contains='/')).distinct()

def update(

self, **kwargs)

Updates all elements in the current QuerySet, setting all the given fields to the appropriate values.

def update(self, **kwargs):
    """
    Updates all elements in the current QuerySet, setting all the given
    fields to the appropriate values.
    """
    assert self.query.can_filter(), \
            "Cannot update a query once a slice has been taken."
    self._for_write = True
    query = self.query.clone(sql.UpdateQuery)
    query.add_update_values(kwargs)
    with transaction.commit_on_success_unless_managed(using=self.db):
        rows = query.get_compiler(self.db).execute_sql(None)
    self._result_cache = None
    return rows

def using(

self, alias)

Selects which database this QuerySet should excecute its query against.

def using(self, alias):
    """
    Selects which database this QuerySet should excecute its query against.
    """
    clone = self._clone()
    clone._db = alias
    return clone

def values(

self, *fields)

def values(self, *fields):
    return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)

def values_list(

self, *fields, **kwargs)

def values_list(self, *fields, **kwargs):
    flat = kwargs.pop('flat', False)
    if kwargs:
        raise TypeError('Unexpected keyword arguments to values_list: %s'
                % (list(kwargs),))
    if flat and len(fields) > 1:
        raise TypeError("'flat' is not valid when values_list is called with more than one field.")
    return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
            _fields=fields)

def virtual(

self, *args)

def virtual(self, *args):
    qs = self
    for f in args:
        if f == 'apparent_collector':
            if not 'sqlite3' in engine and not 'postgresql_psycopg2' in engine:
                qs = qs.extra(select={f: 'IF(media_collections.collector_is_creator, '
                                     'media_collections.creator, media_collections.collector)'})
        else:
            raise Exception("Unsupported virtual field: %s" % f)
    return qs

class MediaCorpusManager

Manage media resource queries

class MediaCorpusManager(CoreManager):
    "Manage media resource queries"

    def get_query_set(self):
        "Return resource query sets"
        return MediaCorpusQuerySet(self.model)

    def quick_search(self, *args, **kwargs):
        return self.get_query_set().quick_search(*args, **kwargs)
    quick_search.__doc__ = MediaCorpusQuerySet.quick_search.__doc__

Ancestors (in MRO)

  • MediaCorpusManager
  • telemeta.models.core.CoreManager
  • telemeta.models.core.EnhancedManager
  • django.db.models.manager.Manager
  • __builtin__.object

Class variables

var creation_counter

Instance variables

var db

Methods

def __init__(

self)

def __init__(self):
    super(Manager, self).__init__()
    self._set_creation_counter()
    self.model = None
    self._inherited = False
    self._db = None

def aggregate(

self, *args, **kwargs)

def aggregate(self, *args, **kwargs):
    return self.get_queryset().aggregate(*args, **kwargs)

def all(

self)

def all(self):
    return self.get_queryset()

def annotate(

self, *args, **kwargs)

def annotate(self, *args, **kwargs):
    return self.get_queryset().annotate(*args, **kwargs)

def bulk_create(

self, *args, **kwargs)

def bulk_create(self, *args, **kwargs):
    return self.get_queryset().bulk_create(*args, **kwargs)

def complex_filter(

self, *args, **kwargs)

def complex_filter(self, *args, **kwargs):
    return self.get_queryset().complex_filter(*args, **kwargs)

def contribute_to_class(

self, model, name)

def contribute_to_class(self, model, name):
    # TODO: Use weakref because of possible memory leak / circular reference.
    self.model = model
    # Only contribute the manager if the model is concrete
    if model._meta.abstract:
        setattr(model, name, AbstractManagerDescriptor(model))
    elif model._meta.swapped:
        setattr(model, name, SwappedManagerDescriptor(model))
    else:
    # if not model._meta.abstract and not model._meta.swapped:
        setattr(model, name, ManagerDescriptor(self))
    if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
        model._default_manager = self
    if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
        model._meta.abstract_managers.append((self.creation_counter, name,
                self))
    else:
        model._meta.concrete_managers.append((self.creation_counter, name,
            self))

def count(

self)

def count(self):
    return self.get_queryset().count()

def create(

self, **kwargs)

def create(self, **kwargs):
    return self.get_queryset().create(**kwargs)

def dates(

self, *args, **kwargs)

def dates(self, *args, **kwargs):
    return self.get_queryset().dates(*args, **kwargs)

def datetimes(

self, *args, **kwargs)

def datetimes(self, *args, **kwargs):
    return self.get_queryset().datetimes(*args, **kwargs)

def db_manager(

self, using)

def db_manager(self, using):
    obj = copy.copy(self)
    obj._db = using
    return obj

def defer(

self, *args, **kwargs)

def defer(self, *args, **kwargs):
    return self.get_queryset().defer(*args, **kwargs)

def distinct(

self, *args, **kwargs)

def distinct(self, *args, **kwargs):
    return self.get_queryset().distinct(*args, **kwargs)

def earliest(

self, *args, **kwargs)

def earliest(self, *args, **kwargs):
    return self.get_queryset().earliest(*args, **kwargs)

def exclude(

self, *args, **kwargs)

def exclude(self, *args, **kwargs):
    return self.get_queryset().exclude(*args, **kwargs)

def exists(

self, *args, **kwargs)

def exists(self, *args, **kwargs):
    return self.get_queryset().exists(*args, **kwargs)

def extra(

self, *args, **kwargs)

def extra(self, *args, **kwargs):
    return self.get_queryset().extra(*args, **kwargs)

def filter(

self, *args, **kwargs)

def filter(self, *args, **kwargs):
    return self.get_queryset().filter(*args, **kwargs)

def first(

self)

def first(self):
    return self.get_queryset().first()

def get(

self, **kwargs)

def get(self, **kwargs):
    if kwargs.has_key('public_id'):
        try:
            args = kwargs.copy()
            args['code'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
        except ObjectDoesNotExist:
            args = kwargs.copy()
            args['id'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
    return super(CoreManager, self).get(**kwargs)

def get_or_create(

self, **kwargs)

def get_or_create(self, **kwargs):
    return self.get_queryset().get_or_create(**kwargs)

def get_query_set(

*args, **kwargs)

def wrapped(*args, **kwargs):
    warnings.warn(
        "`%s.%s` is deprecated, use `%s` instead." %
        (self.class_name, self.old_method_name, self.new_method_name),
        self.deprecation_warning, 2)
    return f(*args, **kwargs)

def get_queryset(

self)

Return resource query sets

def get_query_set(self):
    "Return resource query sets"
    return MediaCorpusQuerySet(self.model)

def in_bulk(

self, *args, **kwargs)

def in_bulk(self, *args, **kwargs):
    return self.get_queryset().in_bulk(*args, **kwargs)

def iterator(

self, *args, **kwargs)

def iterator(self, *args, **kwargs):
    return self.get_queryset().iterator(*args, **kwargs)

def last(

self)

def last(self):
    return self.get_queryset().last()

def latest(

self, *args, **kwargs)

def latest(self, *args, **kwargs):
    return self.get_queryset().latest(*args, **kwargs)

def none(

self, *args, **kwargs)

def none(self, *args, **kwargs):
    ""
    return self.get_query_set().none(*args, **kwargs)

def only(

self, *args, **kwargs)

def only(self, *args, **kwargs):
    return self.get_queryset().only(*args, **kwargs)

def order_by(

self, *args, **kwargs)

def order_by(self, *args, **kwargs):
    return self.get_queryset().order_by(*args, **kwargs)

Perform a quick search on text and char fields

def raw(

self, raw_query, params=None, *args, **kwargs)

def raw(self, raw_query, params=None, *args, **kwargs):
    return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)

def reverse(

self, *args, **kwargs)

def reverse(self, *args, **kwargs):
    return self.get_queryset().reverse(*args, **kwargs)

def select_for_update(

self, *args, **kwargs)

def select_for_update(self, *args, **kwargs):
    return self.get_queryset().select_for_update(*args, **kwargs)

def update(

self, *args, **kwargs)

def update(self, *args, **kwargs):
    return self.get_queryset().update(*args, **kwargs)

def using(

self, *args, **kwargs)

def using(self, *args, **kwargs):
    return self.get_queryset().using(*args, **kwargs)

def values(

self, *args, **kwargs)

def values(self, *args, **kwargs):
    return self.get_queryset().values(*args, **kwargs)

def values_list(

self, *args, **kwargs)

def values_list(self, *args, **kwargs):
    return self.get_queryset().values_list(*args, **kwargs)

class MediaCorpusQuerySet

Base class for all media resource query sets

class MediaCorpusQuerySet(CoreQuerySet):
    "Base class for all media resource query sets"

    def quick_search(self, pattern):
        "Perform a quick search on text and char fields"
        from telemeta.models.corpus import MediaCorpus
        mod = MediaCorpus()
        pattern = pattern.strip()
        q = Q(code__contains=pattern)
        fields = mod.to_dict()
        keys =  fields.keys()

        for field in keys:
            field_str = str(mod._meta.get_field(field))
            if 'CharField' in field_str or 'TextField' in field_str:
                q = q | word_search_q(field, pattern)

        return self.filter(q)

Ancestors (in MRO)

  • MediaCorpusQuerySet
  • telemeta.models.core.CoreQuerySet
  • telemeta.models.core.EnhancedQuerySet
  • django.db.models.query.QuerySet
  • __builtin__.object

Class variables

var value_annotation

Instance variables

var db

Return the database that will be used if this query is executed now

var ordered

Returns True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model.

Methods

def __init__(

self, model=None, query=None, using=None)

def __init__(self, model=None, query=None, using=None):
    self.model = model
    self._db = using
    self.query = query or sql.Query(self.model)
    self._result_cache = None
    self._sticky_filter = False
    self._for_write = False
    self._prefetch_related_lookups = []
    self._prefetch_done = False
    self._known_related_objects = {}        # {rel_field, {pk: rel_obj}}

def aggregate(

self, *args, **kwargs)

Returns a dictionary containing the calculations (aggregation) over the current queryset

If args is present the expression is passed as a kwarg using the Aggregate object's default alias.

def aggregate(self, *args, **kwargs):
    """
    Returns a dictionary containing the calculations (aggregation)
    over the current queryset
    If args is present the expression is passed as a kwarg using
    the Aggregate object's default alias.
    """
    if self.query.distinct_fields:
        raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
    for arg in args:
        kwargs[arg.default_alias] = arg
    query = self.query.clone()
    for (alias, aggregate_expr) in kwargs.items():
        query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=True)
    return query.get_aggregation(using=self.db)

def all(

self)

Returns a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases.

def all(self):
    """
    Returns a new QuerySet that is a copy of the current one. This allows a
    QuerySet to proxy for a model manager in some cases.
    """
    return self._clone()

def annotate(

self, *args, **kwargs)

Return a query set in which the returned objects have been annotated with data aggregated from related fields.

def annotate(self, *args, **kwargs):
    """
    Return a query set in which the returned objects have been annotated
    with data aggregated from related fields.
    """
    for arg in args:
        if arg.default_alias in kwargs:
            raise ValueError("The named annotation '%s' conflicts with the "
                             "default name for another annotation."
                             % arg.default_alias)
        kwargs[arg.default_alias] = arg
    names = getattr(self, '_fields', None)
    if names is None:
        names = set(self.model._meta.get_all_field_names())
    for aggregate in kwargs:
        if aggregate in names:
            raise ValueError("The annotation '%s' conflicts with a field on "
                "the model." % aggregate)
    obj = self._clone()
    obj._setup_aggregate_query(list(kwargs))
    # Add the aggregates to the query
    for (alias, aggregate_expr) in kwargs.items():
        obj.query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=False)
    return obj

def bulk_create(

self, objs, batch_size=None)

Inserts each of the instances into the database. This does not call save() on each of the instances, does not send any pre/post save signals, and does not set the primary key attribute if it is an autoincrement field.

def bulk_create(self, objs, batch_size=None):
    """
    Inserts each of the instances into the database. This does *not* call
    save() on each of the instances, does not send any pre/post save
    signals, and does not set the primary key attribute if it is an
    autoincrement field.
    """
    # So this case is fun. When you bulk insert you don't get the primary
    # keys back (if it's an autoincrement), so you can't insert into the
    # child tables which references this. There are two workarounds, 1)
    # this could be implemented if you didn't have an autoincrement pk,
    # and 2) you could do it by doing O(n) normal inserts into the parent
    # tables to get the primary keys back, and then doing a single bulk
    # insert into the childmost table. Some databases might allow doing
    # this by using RETURNING clause for the insert query. We're punting
    # on these for now because they are relatively rare cases.
    assert batch_size is None or batch_size > 0
    if self.model._meta.parents:
        raise ValueError("Can't bulk create an inherited model")
    if not objs:
        return objs
    self._for_write = True
    connection = connections[self.db]
    fields = self.model._meta.local_concrete_fields
    with transaction.commit_on_success_unless_managed(using=self.db):
        if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
            and self.model._meta.has_auto_field):
            self._batched_insert(objs, fields, batch_size)
        else:
            objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
            if objs_with_pk:
                self._batched_insert(objs_with_pk, fields, batch_size)
            if objs_without_pk:
                fields= [f for f in fields if not isinstance(f, AutoField)]
                self._batched_insert(objs_without_pk, fields, batch_size)
    return objs

def complex_filter(

self, filter_obj)

Returns a new QuerySet instance with filter_obj added to the filters.

filter_obj can be a Q object (or anything with an add_to_query() method) or a dictionary of keyword lookup arguments.

This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods.

def complex_filter(self, filter_obj):
    """
    Returns a new QuerySet instance with filter_obj added to the filters.
    filter_obj can be a Q object (or anything with an add_to_query()
    method) or a dictionary of keyword lookup arguments.
    This exists to support framework features such as 'limit_choices_to',
    and usually it will be more natural to use other methods.
    """
    if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
        clone = self._clone()
        clone.query.add_q(filter_obj)
        return clone
    else:
        return self._filter_or_exclude(None, **filter_obj)

def count(

self)

Performs a SELECT COUNT() and returns the number of records as an integer.

If the QuerySet is already fully cached this simply returns the length of the cached results set to avoid multiple SELECT COUNT(*) calls.

def count(self):
    """
    Performs a SELECT COUNT() and returns the number of records as an
    integer.
    If the QuerySet is already fully cached this simply returns the length
    of the cached results set to avoid multiple SELECT COUNT(*) calls.
    """
    if self._result_cache is not None:
        return len(self._result_cache)
    return self.query.get_count(using=self.db)

def create(

self, **kwargs)

Creates a new object with the given kwargs, saving it to the database and returning the created object.

def create(self, **kwargs):
    """
    Creates a new object with the given kwargs, saving it to the database
    and returning the created object.
    """
    obj = self.model(**kwargs)
    self._for_write = True
    obj.save(force_insert=True, using=self.db)
    return obj

def dates(

self, field_name, kind, order='ASC')

Returns a list of date objects representing all available dates for the given field_name, scoped to 'kind'.

def dates(self, field_name, kind, order='ASC'):
    """
    Returns a list of date objects representing all available dates for
    the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day"), \
            "'kind' must be one of 'year', 'month' or 'day'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    return self._clone(klass=DateQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order)

def datetimes(

self, field_name, kind, order='ASC', tzinfo=None)

Returns a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'.

def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
    """
    Returns a list of datetime objects representing all available
    datetimes for the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day", "hour", "minute", "second"), \
            "'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    if settings.USE_TZ:
        if tzinfo is None:
            tzinfo = timezone.get_current_timezone()
    else:
        tzinfo = None
    return self._clone(klass=DateTimeQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order, _tzinfo=tzinfo)

def defer(

self, *fields)

Defers the loading of data for certain fields until they are accessed. The set of fields to defer is added to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case all deferrals are removed (None acts as a reset option).

def defer(self, *fields):
    """
    Defers the loading of data for certain fields until they are accessed.
    The set of fields to defer is added to any existing set of deferred
    fields. The only exception to this is if None is passed in as the only
    parameter, in which case all deferrals are removed (None acts as a
    reset option).
    """
    clone = self._clone()
    if fields == (None,):
        clone.query.clear_deferred_loading()
    else:
        clone.query.add_deferred_loading(fields)
    return clone

def delete(

self)

def delete(self):
    CHUNK=1024
    objects = self.model._meta.get_all_related_objects()
    ii = self.count()
    values = self.values_list('pk')
    for related in objects:
        i = 0
        while i < ii:
            ids = [v[0] for v in values[i:i + CHUNK]]
            filter = {related.field.name + '__pk__in': ids}
            q = related.model.objects.filter(**filter)
            if isinstance(related.field, WeakForeignKey):
                update = {related.field.name: None}
                q.update(**update)
            else:
                q.delete()
            i += CHUNK
    super(EnhancedQuerySet, self).delete()

def distinct(

self, *field_names)

Returns a new QuerySet instance that will select only distinct results.

def distinct(self, *field_names):
    """
    Returns a new QuerySet instance that will select only distinct results.
    """
    assert self.query.can_filter(), \
            "Cannot create distinct fields once a slice has been taken."
    obj = self._clone()
    obj.query.add_distinct_fields(*field_names)
    return obj

def earliest(

self, field_name=None)

def earliest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="")

def exclude(

self, *args, **kwargs)

Returns a new QuerySet instance with NOT (args) ANDed to the existing set.

def exclude(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with NOT (args) ANDed to the existing
    set.
    """
    return self._filter_or_exclude(True, *args, **kwargs)

def exists(

self)

def exists(self):
    if self._result_cache is None:
        return self.query.has_results(using=self.db)
    return bool(self._result_cache)

def extra(

self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None)

Adds extra SQL fragments to the query.

def extra(self, select=None, where=None, params=None, tables=None,
          order_by=None, select_params=None):
    """
    Adds extra SQL fragments to the query.
    """
    assert self.query.can_filter(), \
            "Cannot change a query once a slice has been taken"
    clone = self._clone()
    clone.query.add_extra(select, select_params, where, params, tables, order_by)
    return clone

def filter(

self, *args, **kwargs)

Returns a new QuerySet instance with the args ANDed to the existing set.

def filter(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with the args ANDed to the existing
    set.
    """
    return self._filter_or_exclude(False, *args, **kwargs)

def first(

self)

Returns the first object of a query, returns None if no match is found.

def first(self):
    """
    Returns the first object of a query, returns None if no match is found.
    """
    qs = self if self.ordered else self.order_by('pk')
    try:
        return qs[0]
    except IndexError:
        return None

def get(

self, *args, **kwargs)

Performs the query and returns a single object matching the given keyword arguments.

def get(self, *args, **kwargs):
    """
    Performs the query and returns a single object matching the given
    keyword arguments.
    """
    clone = self.filter(*args, **kwargs)
    if self.query.can_filter():
        clone = clone.order_by()
    num = len(clone)
    if num == 1:
        return clone._result_cache[0]
    if not num:
        raise self.model.DoesNotExist(
            "%s matching query does not exist." %
            self.model._meta.object_name)
    raise self.model.MultipleObjectsReturned(
        "get() returned more than one %s -- it returned %s!" %
        (self.model._meta.object_name, num))

def get_or_create(

self, **kwargs)

Looks up an object with the given kwargs, creating one if necessary. Returns a tuple of (object, created), where created is a boolean specifying whether an object was created.

def get_or_create(self, **kwargs):
    """
    Looks up an object with the given kwargs, creating one if necessary.
    Returns a tuple of (object, created), where created is a boolean
    specifying whether an object was created.
    """
    defaults = kwargs.pop('defaults', {})
    lookup = kwargs.copy()
    for f in self.model._meta.fields:
        if f.attname in lookup:
            lookup[f.name] = lookup.pop(f.attname)
    try:
        self._for_write = True
        return self.get(**lookup), False
    except self.model.DoesNotExist:
        try:
            params = dict((k, v) for k, v in kwargs.items() if LOOKUP_SEP not in k)
            params.update(defaults)
            obj = self.model(**params)
            with transaction.atomic(using=self.db):
                obj.save(force_insert=True, using=self.db)
            return obj, True
        except DatabaseError:
            exc_info = sys.exc_info()
            try:
                return self.get(**lookup), False
            except self.model.DoesNotExist:
                # Re-raise the DatabaseError with its original traceback.
                six.reraise(*exc_info)

def in_bulk(

self, id_list)

Returns a dictionary mapping each of the given IDs to the object with that ID.

def in_bulk(self, id_list):
    """
    Returns a dictionary mapping each of the given IDs to the object with
    that ID.
    """
    assert self.query.can_filter(), \
            "Cannot use 'limit' or 'offset' with in_bulk"
    if not id_list:
        return {}
    qs = self.filter(pk__in=id_list).order_by()
    return dict([(obj._get_pk_val(), obj) for obj in qs])

def iterator(

self)

An iterator over the results from applying this QuerySet to the database.

def iterator(self):
    """
    An iterator over the results from applying this QuerySet to the
    database.
    """
    fill_cache = False
    if connections[self.db].features.supports_select_related:
        fill_cache = self.query.select_related
    if isinstance(fill_cache, dict):
        requested = fill_cache
    else:
        requested = None
    max_depth = self.query.max_depth
    extra_select = list(self.query.extra_select)
    aggregate_select = list(self.query.aggregate_select)
    only_load = self.query.get_loaded_field_names()
    if not fill_cache:
        fields = self.model._meta.concrete_fields
    load_fields = []
    # If only/defer clauses have been specified,
    # build the list of fields that are to be loaded.
    if only_load:
        for field, model in self.model._meta.get_concrete_fields_with_model():
            if model is None:
                model = self.model
            try:
                if field.name in only_load[model]:
                    # Add a field that has been explicitly included
                    load_fields.append(field.name)
            except KeyError:
                # Model wasn't explicitly listed in the only_load table
                # Therefore, we need to load all fields from this model
                load_fields.append(field.name)
    index_start = len(extra_select)
    aggregate_start = index_start + len(load_fields or self.model._meta.concrete_fields)
    skip = None
    if load_fields and not fill_cache:
        # Some fields have been deferred, so we have to initialise
        # via keyword arguments.
        skip = set()
        init_list = []
        for field in fields:
            if field.name not in load_fields:
                skip.add(field.attname)
            else:
                init_list.append(field.attname)
        model_cls = deferred_class_factory(self.model, skip)
    # Cache db and model outside the loop
    db = self.db
    model = self.model
    compiler = self.query.get_compiler(using=db)
    if fill_cache:
        klass_info = get_klass_info(model, max_depth=max_depth,
                                    requested=requested, only_load=only_load)
    for row in compiler.results_iter():
        if fill_cache:
            obj, _ = get_cached_row(row, index_start, db, klass_info,
                                    offset=len(aggregate_select))
        else:
            # Omit aggregates in object creation.
            row_data = row[index_start:aggregate_start]
            if skip:
                obj = model_cls(**dict(zip(init_list, row_data)))
            else:
                obj = model(*row_data)
            # Store the source database of the object
            obj._state.db = db
            # This object came from the database; it's not being added.
            obj._state.adding = False
        if extra_select:
            for i, k in enumerate(extra_select):
                setattr(obj, k, row[i])
        # Add the aggregates to the model
        if aggregate_select:
            for i, aggregate in enumerate(aggregate_select):
                setattr(obj, aggregate, row[i + aggregate_start])
        # Add the known related objects to the model, if there are any
        if self._known_related_objects:
            for field, rel_objs in self._known_related_objects.items():
                # Avoid overwriting objects loaded e.g. by select_related
                if hasattr(obj, field.get_cache_name()):
                    continue
                pk = getattr(obj, field.get_attname())
                try:
                    rel_obj = rel_objs[pk]
                except KeyError:
                    pass               # may happen in qs1 | qs2 scenarios
                else:
                    setattr(obj, field.name, rel_obj)
        yield obj

def last(

self)

Returns the last object of a query, returns None if no match is found.

def last(self):
    """
    Returns the last object of a query, returns None if no match is found.
    """
    qs = self.reverse() if self.ordered else self.order_by('-pk')
    try:
        return qs[0]
    except IndexError:
        return None

def latest(

self, field_name=None)

def latest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="-")

def none(

self)

Return an empty result set

def none(self): # redundant with none() in recent Django svn
    "Return an empty result set"
    return self.extra(where = ["0 = 1"])

def only(

self, *fields)

Essentially, the opposite of defer. Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated.

def only(self, *fields):
    """
    Essentially, the opposite of defer. Only the fields passed into this
    method and that are not already specified as deferred are loaded
    immediately when the queryset is evaluated.
    """
    if fields == (None,):
        # Can only pass None to defer(), not only(), as the rest option.
        # That won't stop people trying to do this, so let's be explicit.
        raise TypeError("Cannot pass None as an argument to only().")
    clone = self._clone()
    clone.query.add_immediate_loading(fields)
    return clone

def order_by(

self, *field_names)

Returns a new QuerySet instance with the ordering changed.

def order_by(self, *field_names):
    """
    Returns a new QuerySet instance with the ordering changed.
    """
    assert self.query.can_filter(), \
            "Cannot reorder a query once a slice has been taken."
    obj = self._clone()
    obj.query.clear_ordering(force_empty=False)
    obj.query.add_ordering(*field_names)
    return obj

Returns a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated.

When prefetch_related() is called more than once, the list of lookups to prefetch is appended to. If prefetch_related(None) is called, the the list is cleared.

Perform a quick search on text and char fields

def reverse(

self)

Reverses the ordering of the QuerySet.

def reverse(self):
    """
    Reverses the ordering of the QuerySet.
    """
    clone = self._clone()
    clone.query.standard_ordering = not clone.query.standard_ordering
    return clone

def select_for_update(

self, **kwargs)

Returns a new QuerySet instance that will select objects with a FOR UPDATE lock.

def select_for_update(self, **kwargs):
    """
    Returns a new QuerySet instance that will select objects with a
    FOR UPDATE lock.
    """
    # Default to false for nowait
    nowait = kwargs.pop('nowait', False)
    obj = self._clone()
    obj._for_write = True
    obj.query.select_for_update = True
    obj.query.select_for_update_nowait = nowait
    return obj

Returns a new QuerySet instance that will select related objects.

If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection.

If select_related(None) is called, the list is cleared.

def update(

self, **kwargs)

Updates all elements in the current QuerySet, setting all the given fields to the appropriate values.

def update(self, **kwargs):
    """
    Updates all elements in the current QuerySet, setting all the given
    fields to the appropriate values.
    """
    assert self.query.can_filter(), \
            "Cannot update a query once a slice has been taken."
    self._for_write = True
    query = self.query.clone(sql.UpdateQuery)
    query.add_update_values(kwargs)
    with transaction.commit_on_success_unless_managed(using=self.db):
        rows = query.get_compiler(self.db).execute_sql(None)
    self._result_cache = None
    return rows

def using(

self, alias)

Selects which database this QuerySet should excecute its query against.

def using(self, alias):
    """
    Selects which database this QuerySet should excecute its query against.
    """
    clone = self._clone()
    clone._db = alias
    return clone

def values(

self, *fields)

def values(self, *fields):
    return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)

def values_list(

self, *fields, **kwargs)

def values_list(self, *fields, **kwargs):
    flat = kwargs.pop('flat', False)
    if kwargs:
        raise TypeError('Unexpected keyword arguments to values_list: %s'
                % (list(kwargs),))
    if flat and len(fields) > 1:
        raise TypeError("'flat' is not valid when values_list is called with more than one field.")
    return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
            _fields=fields)

class MediaFondsManager

Manage media resource queries

class MediaFondsManager(CoreManager):
    "Manage media resource queries"

    def get_query_set(self):
        "Return resource query sets"
        return MediaFondsQuerySet(self.model)

    def quick_search(self, *args, **kwargs):
        return self.get_query_set().quick_search(*args, **kwargs)
    quick_search.__doc__ = MediaFondsQuerySet.quick_search.__doc__

Ancestors (in MRO)

  • MediaFondsManager
  • telemeta.models.core.CoreManager
  • telemeta.models.core.EnhancedManager
  • django.db.models.manager.Manager
  • __builtin__.object

Class variables

var creation_counter

Instance variables

var db

Methods

def __init__(

self)

def __init__(self):
    super(Manager, self).__init__()
    self._set_creation_counter()
    self.model = None
    self._inherited = False
    self._db = None

def aggregate(

self, *args, **kwargs)

def aggregate(self, *args, **kwargs):
    return self.get_queryset().aggregate(*args, **kwargs)

def all(

self)

def all(self):
    return self.get_queryset()

def annotate(

self, *args, **kwargs)

def annotate(self, *args, **kwargs):
    return self.get_queryset().annotate(*args, **kwargs)

def bulk_create(

self, *args, **kwargs)

def bulk_create(self, *args, **kwargs):
    return self.get_queryset().bulk_create(*args, **kwargs)

def complex_filter(

self, *args, **kwargs)

def complex_filter(self, *args, **kwargs):
    return self.get_queryset().complex_filter(*args, **kwargs)

def contribute_to_class(

self, model, name)

def contribute_to_class(self, model, name):
    # TODO: Use weakref because of possible memory leak / circular reference.
    self.model = model
    # Only contribute the manager if the model is concrete
    if model._meta.abstract:
        setattr(model, name, AbstractManagerDescriptor(model))
    elif model._meta.swapped:
        setattr(model, name, SwappedManagerDescriptor(model))
    else:
    # if not model._meta.abstract and not model._meta.swapped:
        setattr(model, name, ManagerDescriptor(self))
    if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
        model._default_manager = self
    if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
        model._meta.abstract_managers.append((self.creation_counter, name,
                self))
    else:
        model._meta.concrete_managers.append((self.creation_counter, name,
            self))

def count(

self)

def count(self):
    return self.get_queryset().count()

def create(

self, **kwargs)

def create(self, **kwargs):
    return self.get_queryset().create(**kwargs)

def dates(

self, *args, **kwargs)

def dates(self, *args, **kwargs):
    return self.get_queryset().dates(*args, **kwargs)

def datetimes(

self, *args, **kwargs)

def datetimes(self, *args, **kwargs):
    return self.get_queryset().datetimes(*args, **kwargs)

def db_manager(

self, using)

def db_manager(self, using):
    obj = copy.copy(self)
    obj._db = using
    return obj

def defer(

self, *args, **kwargs)

def defer(self, *args, **kwargs):
    return self.get_queryset().defer(*args, **kwargs)

def distinct(

self, *args, **kwargs)

def distinct(self, *args, **kwargs):
    return self.get_queryset().distinct(*args, **kwargs)

def earliest(

self, *args, **kwargs)

def earliest(self, *args, **kwargs):
    return self.get_queryset().earliest(*args, **kwargs)

def exclude(

self, *args, **kwargs)

def exclude(self, *args, **kwargs):
    return self.get_queryset().exclude(*args, **kwargs)

def exists(

self, *args, **kwargs)

def exists(self, *args, **kwargs):
    return self.get_queryset().exists(*args, **kwargs)

def extra(

self, *args, **kwargs)

def extra(self, *args, **kwargs):
    return self.get_queryset().extra(*args, **kwargs)

def filter(

self, *args, **kwargs)

def filter(self, *args, **kwargs):
    return self.get_queryset().filter(*args, **kwargs)

def first(

self)

def first(self):
    return self.get_queryset().first()

def get(

self, **kwargs)

def get(self, **kwargs):
    if kwargs.has_key('public_id'):
        try:
            args = kwargs.copy()
            args['code'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
        except ObjectDoesNotExist:
            args = kwargs.copy()
            args['id'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
    return super(CoreManager, self).get(**kwargs)

def get_or_create(

self, **kwargs)

def get_or_create(self, **kwargs):
    return self.get_queryset().get_or_create(**kwargs)

def get_query_set(

*args, **kwargs)

def wrapped(*args, **kwargs):
    warnings.warn(
        "`%s.%s` is deprecated, use `%s` instead." %
        (self.class_name, self.old_method_name, self.new_method_name),
        self.deprecation_warning, 2)
    return f(*args, **kwargs)

def get_queryset(

self)

Return resource query sets

def get_query_set(self):
    "Return resource query sets"
    return MediaFondsQuerySet(self.model)

def in_bulk(

self, *args, **kwargs)

def in_bulk(self, *args, **kwargs):
    return self.get_queryset().in_bulk(*args, **kwargs)

def iterator(

self, *args, **kwargs)

def iterator(self, *args, **kwargs):
    return self.get_queryset().iterator(*args, **kwargs)

def last(

self)

def last(self):
    return self.get_queryset().last()

def latest(

self, *args, **kwargs)

def latest(self, *args, **kwargs):
    return self.get_queryset().latest(*args, **kwargs)

def none(

self, *args, **kwargs)

def none(self, *args, **kwargs):
    ""
    return self.get_query_set().none(*args, **kwargs)

def only(

self, *args, **kwargs)

def only(self, *args, **kwargs):
    return self.get_queryset().only(*args, **kwargs)

def order_by(

self, *args, **kwargs)

def order_by(self, *args, **kwargs):
    return self.get_queryset().order_by(*args, **kwargs)

Perform a quick search on text and char fields

def raw(

self, raw_query, params=None, *args, **kwargs)

def raw(self, raw_query, params=None, *args, **kwargs):
    return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)

def reverse(

self, *args, **kwargs)

def reverse(self, *args, **kwargs):
    return self.get_queryset().reverse(*args, **kwargs)

def select_for_update(

self, *args, **kwargs)

def select_for_update(self, *args, **kwargs):
    return self.get_queryset().select_for_update(*args, **kwargs)

def update(

self, *args, **kwargs)

def update(self, *args, **kwargs):
    return self.get_queryset().update(*args, **kwargs)

def using(

self, *args, **kwargs)

def using(self, *args, **kwargs):
    return self.get_queryset().using(*args, **kwargs)

def values(

self, *args, **kwargs)

def values(self, *args, **kwargs):
    return self.get_queryset().values(*args, **kwargs)

def values_list(

self, *args, **kwargs)

def values_list(self, *args, **kwargs):
    return self.get_queryset().values_list(*args, **kwargs)

class MediaFondsQuerySet

Base class for all media resource query sets

class MediaFondsQuerySet(CoreQuerySet):
    "Base class for all media resource query sets"

    def quick_search(self, pattern):
        "Perform a quick search on text and char fields"
        from telemeta.models.fonds import MediaFonds
        mod = MediaFonds()
        pattern = pattern.strip()
        q = Q(code__contains=pattern)
        fields = mod.to_dict()
        keys =  fields.keys()
        for field in keys:
            field_str = str(mod._meta.get_field(field))
            if 'CharField' in field_str or 'TextField' in field_str:
                q = q | word_search_q(field, pattern)
        return self.filter(q)

Ancestors (in MRO)

  • MediaFondsQuerySet
  • telemeta.models.core.CoreQuerySet
  • telemeta.models.core.EnhancedQuerySet
  • django.db.models.query.QuerySet
  • __builtin__.object

Class variables

var value_annotation

Instance variables

var db

Return the database that will be used if this query is executed now

var ordered

Returns True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model.

Methods

def __init__(

self, model=None, query=None, using=None)

def __init__(self, model=None, query=None, using=None):
    self.model = model
    self._db = using
    self.query = query or sql.Query(self.model)
    self._result_cache = None
    self._sticky_filter = False
    self._for_write = False
    self._prefetch_related_lookups = []
    self._prefetch_done = False
    self._known_related_objects = {}        # {rel_field, {pk: rel_obj}}

def aggregate(

self, *args, **kwargs)

Returns a dictionary containing the calculations (aggregation) over the current queryset

If args is present the expression is passed as a kwarg using the Aggregate object's default alias.

def aggregate(self, *args, **kwargs):
    """
    Returns a dictionary containing the calculations (aggregation)
    over the current queryset
    If args is present the expression is passed as a kwarg using
    the Aggregate object's default alias.
    """
    if self.query.distinct_fields:
        raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
    for arg in args:
        kwargs[arg.default_alias] = arg
    query = self.query.clone()
    for (alias, aggregate_expr) in kwargs.items():
        query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=True)
    return query.get_aggregation(using=self.db)

def all(

self)

Returns a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases.

def all(self):
    """
    Returns a new QuerySet that is a copy of the current one. This allows a
    QuerySet to proxy for a model manager in some cases.
    """
    return self._clone()

def annotate(

self, *args, **kwargs)

Return a query set in which the returned objects have been annotated with data aggregated from related fields.

def annotate(self, *args, **kwargs):
    """
    Return a query set in which the returned objects have been annotated
    with data aggregated from related fields.
    """
    for arg in args:
        if arg.default_alias in kwargs:
            raise ValueError("The named annotation '%s' conflicts with the "
                             "default name for another annotation."
                             % arg.default_alias)
        kwargs[arg.default_alias] = arg
    names = getattr(self, '_fields', None)
    if names is None:
        names = set(self.model._meta.get_all_field_names())
    for aggregate in kwargs:
        if aggregate in names:
            raise ValueError("The annotation '%s' conflicts with a field on "
                "the model." % aggregate)
    obj = self._clone()
    obj._setup_aggregate_query(list(kwargs))
    # Add the aggregates to the query
    for (alias, aggregate_expr) in kwargs.items():
        obj.query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=False)
    return obj

def bulk_create(

self, objs, batch_size=None)

Inserts each of the instances into the database. This does not call save() on each of the instances, does not send any pre/post save signals, and does not set the primary key attribute if it is an autoincrement field.

def bulk_create(self, objs, batch_size=None):
    """
    Inserts each of the instances into the database. This does *not* call
    save() on each of the instances, does not send any pre/post save
    signals, and does not set the primary key attribute if it is an
    autoincrement field.
    """
    # So this case is fun. When you bulk insert you don't get the primary
    # keys back (if it's an autoincrement), so you can't insert into the
    # child tables which references this. There are two workarounds, 1)
    # this could be implemented if you didn't have an autoincrement pk,
    # and 2) you could do it by doing O(n) normal inserts into the parent
    # tables to get the primary keys back, and then doing a single bulk
    # insert into the childmost table. Some databases might allow doing
    # this by using RETURNING clause for the insert query. We're punting
    # on these for now because they are relatively rare cases.
    assert batch_size is None or batch_size > 0
    if self.model._meta.parents:
        raise ValueError("Can't bulk create an inherited model")
    if not objs:
        return objs
    self._for_write = True
    connection = connections[self.db]
    fields = self.model._meta.local_concrete_fields
    with transaction.commit_on_success_unless_managed(using=self.db):
        if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
            and self.model._meta.has_auto_field):
            self._batched_insert(objs, fields, batch_size)
        else:
            objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
            if objs_with_pk:
                self._batched_insert(objs_with_pk, fields, batch_size)
            if objs_without_pk:
                fields= [f for f in fields if not isinstance(f, AutoField)]
                self._batched_insert(objs_without_pk, fields, batch_size)
    return objs

def complex_filter(

self, filter_obj)

Returns a new QuerySet instance with filter_obj added to the filters.

filter_obj can be a Q object (or anything with an add_to_query() method) or a dictionary of keyword lookup arguments.

This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods.

def complex_filter(self, filter_obj):
    """
    Returns a new QuerySet instance with filter_obj added to the filters.
    filter_obj can be a Q object (or anything with an add_to_query()
    method) or a dictionary of keyword lookup arguments.
    This exists to support framework features such as 'limit_choices_to',
    and usually it will be more natural to use other methods.
    """
    if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
        clone = self._clone()
        clone.query.add_q(filter_obj)
        return clone
    else:
        return self._filter_or_exclude(None, **filter_obj)

def count(

self)

Performs a SELECT COUNT() and returns the number of records as an integer.

If the QuerySet is already fully cached this simply returns the length of the cached results set to avoid multiple SELECT COUNT(*) calls.

def count(self):
    """
    Performs a SELECT COUNT() and returns the number of records as an
    integer.
    If the QuerySet is already fully cached this simply returns the length
    of the cached results set to avoid multiple SELECT COUNT(*) calls.
    """
    if self._result_cache is not None:
        return len(self._result_cache)
    return self.query.get_count(using=self.db)

def create(

self, **kwargs)

Creates a new object with the given kwargs, saving it to the database and returning the created object.

def create(self, **kwargs):
    """
    Creates a new object with the given kwargs, saving it to the database
    and returning the created object.
    """
    obj = self.model(**kwargs)
    self._for_write = True
    obj.save(force_insert=True, using=self.db)
    return obj

def dates(

self, field_name, kind, order='ASC')

Returns a list of date objects representing all available dates for the given field_name, scoped to 'kind'.

def dates(self, field_name, kind, order='ASC'):
    """
    Returns a list of date objects representing all available dates for
    the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day"), \
            "'kind' must be one of 'year', 'month' or 'day'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    return self._clone(klass=DateQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order)

def datetimes(

self, field_name, kind, order='ASC', tzinfo=None)

Returns a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'.

def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
    """
    Returns a list of datetime objects representing all available
    datetimes for the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day", "hour", "minute", "second"), \
            "'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    if settings.USE_TZ:
        if tzinfo is None:
            tzinfo = timezone.get_current_timezone()
    else:
        tzinfo = None
    return self._clone(klass=DateTimeQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order, _tzinfo=tzinfo)

def defer(

self, *fields)

Defers the loading of data for certain fields until they are accessed. The set of fields to defer is added to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case all deferrals are removed (None acts as a reset option).

def defer(self, *fields):
    """
    Defers the loading of data for certain fields until they are accessed.
    The set of fields to defer is added to any existing set of deferred
    fields. The only exception to this is if None is passed in as the only
    parameter, in which case all deferrals are removed (None acts as a
    reset option).
    """
    clone = self._clone()
    if fields == (None,):
        clone.query.clear_deferred_loading()
    else:
        clone.query.add_deferred_loading(fields)
    return clone

def delete(

self)

def delete(self):
    CHUNK=1024
    objects = self.model._meta.get_all_related_objects()
    ii = self.count()
    values = self.values_list('pk')
    for related in objects:
        i = 0
        while i < ii:
            ids = [v[0] for v in values[i:i + CHUNK]]
            filter = {related.field.name + '__pk__in': ids}
            q = related.model.objects.filter(**filter)
            if isinstance(related.field, WeakForeignKey):
                update = {related.field.name: None}
                q.update(**update)
            else:
                q.delete()
            i += CHUNK
    super(EnhancedQuerySet, self).delete()

def distinct(

self, *field_names)

Returns a new QuerySet instance that will select only distinct results.

def distinct(self, *field_names):
    """
    Returns a new QuerySet instance that will select only distinct results.
    """
    assert self.query.can_filter(), \
            "Cannot create distinct fields once a slice has been taken."
    obj = self._clone()
    obj.query.add_distinct_fields(*field_names)
    return obj

def earliest(

self, field_name=None)

def earliest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="")

def exclude(

self, *args, **kwargs)

Returns a new QuerySet instance with NOT (args) ANDed to the existing set.

def exclude(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with NOT (args) ANDed to the existing
    set.
    """
    return self._filter_or_exclude(True, *args, **kwargs)

def exists(

self)

def exists(self):
    if self._result_cache is None:
        return self.query.has_results(using=self.db)
    return bool(self._result_cache)

def extra(

self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None)

Adds extra SQL fragments to the query.

def extra(self, select=None, where=None, params=None, tables=None,
          order_by=None, select_params=None):
    """
    Adds extra SQL fragments to the query.
    """
    assert self.query.can_filter(), \
            "Cannot change a query once a slice has been taken"
    clone = self._clone()
    clone.query.add_extra(select, select_params, where, params, tables, order_by)
    return clone

def filter(

self, *args, **kwargs)

Returns a new QuerySet instance with the args ANDed to the existing set.

def filter(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with the args ANDed to the existing
    set.
    """
    return self._filter_or_exclude(False, *args, **kwargs)

def first(

self)

Returns the first object of a query, returns None if no match is found.

def first(self):
    """
    Returns the first object of a query, returns None if no match is found.
    """
    qs = self if self.ordered else self.order_by('pk')
    try:
        return qs[0]
    except IndexError:
        return None

def get(

self, *args, **kwargs)

Performs the query and returns a single object matching the given keyword arguments.

def get(self, *args, **kwargs):
    """
    Performs the query and returns a single object matching the given
    keyword arguments.
    """
    clone = self.filter(*args, **kwargs)
    if self.query.can_filter():
        clone = clone.order_by()
    num = len(clone)
    if num == 1:
        return clone._result_cache[0]
    if not num:
        raise self.model.DoesNotExist(
            "%s matching query does not exist." %
            self.model._meta.object_name)
    raise self.model.MultipleObjectsReturned(
        "get() returned more than one %s -- it returned %s!" %
        (self.model._meta.object_name, num))

def get_or_create(

self, **kwargs)

Looks up an object with the given kwargs, creating one if necessary. Returns a tuple of (object, created), where created is a boolean specifying whether an object was created.

def get_or_create(self, **kwargs):
    """
    Looks up an object with the given kwargs, creating one if necessary.
    Returns a tuple of (object, created), where created is a boolean
    specifying whether an object was created.
    """
    defaults = kwargs.pop('defaults', {})
    lookup = kwargs.copy()
    for f in self.model._meta.fields:
        if f.attname in lookup:
            lookup[f.name] = lookup.pop(f.attname)
    try:
        self._for_write = True
        return self.get(**lookup), False
    except self.model.DoesNotExist:
        try:
            params = dict((k, v) for k, v in kwargs.items() if LOOKUP_SEP not in k)
            params.update(defaults)
            obj = self.model(**params)
            with transaction.atomic(using=self.db):
                obj.save(force_insert=True, using=self.db)
            return obj, True
        except DatabaseError:
            exc_info = sys.exc_info()
            try:
                return self.get(**lookup), False
            except self.model.DoesNotExist:
                # Re-raise the DatabaseError with its original traceback.
                six.reraise(*exc_info)

def in_bulk(

self, id_list)

Returns a dictionary mapping each of the given IDs to the object with that ID.

def in_bulk(self, id_list):
    """
    Returns a dictionary mapping each of the given IDs to the object with
    that ID.
    """
    assert self.query.can_filter(), \
            "Cannot use 'limit' or 'offset' with in_bulk"
    if not id_list:
        return {}
    qs = self.filter(pk__in=id_list).order_by()
    return dict([(obj._get_pk_val(), obj) for obj in qs])

def iterator(

self)

An iterator over the results from applying this QuerySet to the database.

def iterator(self):
    """
    An iterator over the results from applying this QuerySet to the
    database.
    """
    fill_cache = False
    if connections[self.db].features.supports_select_related:
        fill_cache = self.query.select_related
    if isinstance(fill_cache, dict):
        requested = fill_cache
    else:
        requested = None
    max_depth = self.query.max_depth
    extra_select = list(self.query.extra_select)
    aggregate_select = list(self.query.aggregate_select)
    only_load = self.query.get_loaded_field_names()
    if not fill_cache:
        fields = self.model._meta.concrete_fields
    load_fields = []
    # If only/defer clauses have been specified,
    # build the list of fields that are to be loaded.
    if only_load:
        for field, model in self.model._meta.get_concrete_fields_with_model():
            if model is None:
                model = self.model
            try:
                if field.name in only_load[model]:
                    # Add a field that has been explicitly included
                    load_fields.append(field.name)
            except KeyError:
                # Model wasn't explicitly listed in the only_load table
                # Therefore, we need to load all fields from this model
                load_fields.append(field.name)
    index_start = len(extra_select)
    aggregate_start = index_start + len(load_fields or self.model._meta.concrete_fields)
    skip = None
    if load_fields and not fill_cache:
        # Some fields have been deferred, so we have to initialise
        # via keyword arguments.
        skip = set()
        init_list = []
        for field in fields:
            if field.name not in load_fields:
                skip.add(field.attname)
            else:
                init_list.append(field.attname)
        model_cls = deferred_class_factory(self.model, skip)
    # Cache db and model outside the loop
    db = self.db
    model = self.model
    compiler = self.query.get_compiler(using=db)
    if fill_cache:
        klass_info = get_klass_info(model, max_depth=max_depth,
                                    requested=requested, only_load=only_load)
    for row in compiler.results_iter():
        if fill_cache:
            obj, _ = get_cached_row(row, index_start, db, klass_info,
                                    offset=len(aggregate_select))
        else:
            # Omit aggregates in object creation.
            row_data = row[index_start:aggregate_start]
            if skip:
                obj = model_cls(**dict(zip(init_list, row_data)))
            else:
                obj = model(*row_data)
            # Store the source database of the object
            obj._state.db = db
            # This object came from the database; it's not being added.
            obj._state.adding = False
        if extra_select:
            for i, k in enumerate(extra_select):
                setattr(obj, k, row[i])
        # Add the aggregates to the model
        if aggregate_select:
            for i, aggregate in enumerate(aggregate_select):
                setattr(obj, aggregate, row[i + aggregate_start])
        # Add the known related objects to the model, if there are any
        if self._known_related_objects:
            for field, rel_objs in self._known_related_objects.items():
                # Avoid overwriting objects loaded e.g. by select_related
                if hasattr(obj, field.get_cache_name()):
                    continue
                pk = getattr(obj, field.get_attname())
                try:
                    rel_obj = rel_objs[pk]
                except KeyError:
                    pass               # may happen in qs1 | qs2 scenarios
                else:
                    setattr(obj, field.name, rel_obj)
        yield obj

def last(

self)

Returns the last object of a query, returns None if no match is found.

def last(self):
    """
    Returns the last object of a query, returns None if no match is found.
    """
    qs = self.reverse() if self.ordered else self.order_by('-pk')
    try:
        return qs[0]
    except IndexError:
        return None

def latest(

self, field_name=None)

def latest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="-")

def none(

self)

Return an empty result set

def none(self): # redundant with none() in recent Django svn
    "Return an empty result set"
    return self.extra(where = ["0 = 1"])

def only(

self, *fields)

Essentially, the opposite of defer. Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated.

def only(self, *fields):
    """
    Essentially, the opposite of defer. Only the fields passed into this
    method and that are not already specified as deferred are loaded
    immediately when the queryset is evaluated.
    """
    if fields == (None,):
        # Can only pass None to defer(), not only(), as the rest option.
        # That won't stop people trying to do this, so let's be explicit.
        raise TypeError("Cannot pass None as an argument to only().")
    clone = self._clone()
    clone.query.add_immediate_loading(fields)
    return clone

def order_by(

self, *field_names)

Returns a new QuerySet instance with the ordering changed.

def order_by(self, *field_names):
    """
    Returns a new QuerySet instance with the ordering changed.
    """
    assert self.query.can_filter(), \
            "Cannot reorder a query once a slice has been taken."
    obj = self._clone()
    obj.query.clear_ordering(force_empty=False)
    obj.query.add_ordering(*field_names)
    return obj

Returns a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated.

When prefetch_related() is called more than once, the list of lookups to prefetch is appended to. If prefetch_related(None) is called, the the list is cleared.

Perform a quick search on text and char fields

def reverse(

self)

Reverses the ordering of the QuerySet.

def reverse(self):
    """
    Reverses the ordering of the QuerySet.
    """
    clone = self._clone()
    clone.query.standard_ordering = not clone.query.standard_ordering
    return clone

def select_for_update(

self, **kwargs)

Returns a new QuerySet instance that will select objects with a FOR UPDATE lock.

def select_for_update(self, **kwargs):
    """
    Returns a new QuerySet instance that will select objects with a
    FOR UPDATE lock.
    """
    # Default to false for nowait
    nowait = kwargs.pop('nowait', False)
    obj = self._clone()
    obj._for_write = True
    obj.query.select_for_update = True
    obj.query.select_for_update_nowait = nowait
    return obj

Returns a new QuerySet instance that will select related objects.

If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection.

If select_related(None) is called, the list is cleared.

def update(

self, **kwargs)

Updates all elements in the current QuerySet, setting all the given fields to the appropriate values.

def update(self, **kwargs):
    """
    Updates all elements in the current QuerySet, setting all the given
    fields to the appropriate values.
    """
    assert self.query.can_filter(), \
            "Cannot update a query once a slice has been taken."
    self._for_write = True
    query = self.query.clone(sql.UpdateQuery)
    query.add_update_values(kwargs)
    with transaction.commit_on_success_unless_managed(using=self.db):
        rows = query.get_compiler(self.db).execute_sql(None)
    self._result_cache = None
    return rows

def using(

self, alias)

Selects which database this QuerySet should excecute its query against.

def using(self, alias):
    """
    Selects which database this QuerySet should excecute its query against.
    """
    clone = self._clone()
    clone._db = alias
    return clone

def values(

self, *fields)

def values(self, *fields):
    return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)

def values_list(

self, *fields, **kwargs)

def values_list(self, *fields, **kwargs):
    flat = kwargs.pop('flat', False)
    if kwargs:
        raise TypeError('Unexpected keyword arguments to values_list: %s'
                % (list(kwargs),))
    if flat and len(fields) > 1:
        raise TypeError("'flat' is not valid when values_list is called with more than one field.")
    return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
            _fields=fields)

class MediaItemManager

Manage media items queries

class MediaItemManager(CoreManager):
    "Manage media items queries"

    def get_query_set(self):
        "Return media query sets"
        return MediaItemQuerySet(self.model)

    def enriched(self):
        "Query set with additional virtual fields such as apparent_collector and country_or_continent"
        return self.get_query_set().virtual('apparent_collector', 'country_or_continent')

    def quick_search(self, *args, **kwargs):
        return self.get_query_set().quick_search(*args, **kwargs)
    quick_search.__doc__ = MediaItemQuerySet.quick_search.__doc__

    def without_collection(self, *args, **kwargs):
        return self.get_query_set().without_collection(*args, **kwargs)
    without_collection.__doc__ = MediaItemQuerySet.without_collection.__doc__

    def by_recording_date(self, *args, **kwargs):
        return self.get_query_set().by_recording_date(*args, **kwargs)
    by_recording_date.__doc__ = MediaItemQuerySet.by_recording_date.__doc__

    def by_title(self, *args, **kwargs):
        return self.get_query_set().by_title(*args, **kwargs)
    by_title.__doc__ = MediaItemQuerySet.by_title.__doc__

    def by_publish_year(self, *args, **kwargs):
        return self.get_query_set().by_publish_year(*args, **kwargs)
    by_publish_year.__doc__ = MediaItemQuerySet.by_publish_year.__doc__

    def by_change_time(self, *args, **kwargs):
        return self.get_query_set().by_change_time(*args, **kwargs)
    by_change_time.__doc__ = MediaItemQuerySet.by_change_time.__doc__

    def by_location(self, *args, **kwargs):
        return self.get_query_set().by_location(*args, **kwargs)
    by_location.__doc__ = MediaItemQuerySet.by_location.__doc__

    def sound(self, *args, **kwargs):
        return self.get_query_set().sound(*args, **kwargs)
    sound.__doc__ = MediaItemQuerySet.sound.__doc__

    def sound_public(self, *args, **kwargs):
        return self.get_query_set().sound_public(*args, **kwargs)
    sound_public.__doc__ = MediaItemQuerySet.sound_public.__doc__

    def by_instrument(self, *args, **kwargs):
        return self.get_query_set().by_instrument(*args, **kwargs)
    by_instrument.__doc__ = MediaItemQuerySet.by_instrument.__doc__

Ancestors (in MRO)

  • MediaItemManager
  • telemeta.models.core.CoreManager
  • telemeta.models.core.EnhancedManager
  • django.db.models.manager.Manager
  • __builtin__.object

Class variables

var creation_counter

Instance variables

var db

Methods

def __init__(

self)

def __init__(self):
    super(Manager, self).__init__()
    self._set_creation_counter()
    self.model = None
    self._inherited = False
    self._db = None

def aggregate(

self, *args, **kwargs)

def aggregate(self, *args, **kwargs):
    return self.get_queryset().aggregate(*args, **kwargs)

def all(

self)

def all(self):
    return self.get_queryset()

def annotate(

self, *args, **kwargs)

def annotate(self, *args, **kwargs):
    return self.get_queryset().annotate(*args, **kwargs)

def bulk_create(

self, *args, **kwargs)

def bulk_create(self, *args, **kwargs):
    return self.get_queryset().bulk_create(*args, **kwargs)

def by_change_time(

self, *args, **kwargs)

Find items by last change time

def by_change_time(self, *args, **kwargs):
    return self.get_query_set().by_change_time(*args, **kwargs)

def by_instrument(

self, *args, **kwargs)

Find items by instrument

def by_instrument(self, *args, **kwargs):
    return self.get_query_set().by_instrument(*args, **kwargs)

def by_location(

self, *args, **kwargs)

Find items by location

def by_location(self, *args, **kwargs):
    return self.get_query_set().by_location(*args, **kwargs)

def by_publish_year(

self, *args, **kwargs)

Find items by publishing year

def by_publish_year(self, *args, **kwargs):
    return self.get_query_set().by_publish_year(*args, **kwargs)

def by_recording_date(

self, *args, **kwargs)

Find items by recording date

def by_recording_date(self, *args, **kwargs):
    return self.get_query_set().by_recording_date(*args, **kwargs)

def by_title(

self, *args, **kwargs)

Find items by title

def by_title(self, *args, **kwargs):
    return self.get_query_set().by_title(*args, **kwargs)

def complex_filter(

self, *args, **kwargs)

def complex_filter(self, *args, **kwargs):
    return self.get_queryset().complex_filter(*args, **kwargs)

def contribute_to_class(

self, model, name)

def contribute_to_class(self, model, name):
    # TODO: Use weakref because of possible memory leak / circular reference.
    self.model = model
    # Only contribute the manager if the model is concrete
    if model._meta.abstract:
        setattr(model, name, AbstractManagerDescriptor(model))
    elif model._meta.swapped:
        setattr(model, name, SwappedManagerDescriptor(model))
    else:
    # if not model._meta.abstract and not model._meta.swapped:
        setattr(model, name, ManagerDescriptor(self))
    if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
        model._default_manager = self
    if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
        model._meta.abstract_managers.append((self.creation_counter, name,
                self))
    else:
        model._meta.concrete_managers.append((self.creation_counter, name,
            self))

def count(

self)

def count(self):
    return self.get_queryset().count()

def create(

self, **kwargs)

def create(self, **kwargs):
    return self.get_queryset().create(**kwargs)

def dates(

self, *args, **kwargs)

def dates(self, *args, **kwargs):
    return self.get_queryset().dates(*args, **kwargs)

def datetimes(

self, *args, **kwargs)

def datetimes(self, *args, **kwargs):
    return self.get_queryset().datetimes(*args, **kwargs)

def db_manager(

self, using)

def db_manager(self, using):
    obj = copy.copy(self)
    obj._db = using
    return obj

def defer(

self, *args, **kwargs)

def defer(self, *args, **kwargs):
    return self.get_queryset().defer(*args, **kwargs)

def distinct(

self, *args, **kwargs)

def distinct(self, *args, **kwargs):
    return self.get_queryset().distinct(*args, **kwargs)

def earliest(

self, *args, **kwargs)

def earliest(self, *args, **kwargs):
    return self.get_queryset().earliest(*args, **kwargs)

def enriched(

self)

Query set with additional virtual fields such as apparent_collector and country_or_continent

def enriched(self):
    "Query set with additional virtual fields such as apparent_collector and country_or_continent"
    return self.get_query_set().virtual('apparent_collector', 'country_or_continent')

def exclude(

self, *args, **kwargs)

def exclude(self, *args, **kwargs):
    return self.get_queryset().exclude(*args, **kwargs)

def exists(

self, *args, **kwargs)

def exists(self, *args, **kwargs):
    return self.get_queryset().exists(*args, **kwargs)

def extra(

self, *args, **kwargs)

def extra(self, *args, **kwargs):
    return self.get_queryset().extra(*args, **kwargs)

def filter(

self, *args, **kwargs)

def filter(self, *args, **kwargs):
    return self.get_queryset().filter(*args, **kwargs)

def first(

self)

def first(self):
    return self.get_queryset().first()

def get(

self, **kwargs)

def get(self, **kwargs):
    if kwargs.has_key('public_id'):
        try:
            args = kwargs.copy()
            args['code'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
        except ObjectDoesNotExist:
            args = kwargs.copy()
            args['id'] = kwargs['public_id']
            args.pop('public_id')
            return super(CoreManager, self).get(**args)
    return super(CoreManager, self).get(**kwargs)

def get_or_create(

self, **kwargs)

def get_or_create(self, **kwargs):
    return self.get_queryset().get_or_create(**kwargs)

def get_query_set(

*args, **kwargs)

def wrapped(*args, **kwargs):
    warnings.warn(
        "`%s.%s` is deprecated, use `%s` instead." %
        (self.class_name, self.old_method_name, self.new_method_name),
        self.deprecation_warning, 2)
    return f(*args, **kwargs)

def get_queryset(

self)

Return media query sets

def get_query_set(self):
    "Return media query sets"
    return MediaItemQuerySet(self.model)

def in_bulk(

self, *args, **kwargs)

def in_bulk(self, *args, **kwargs):
    return self.get_queryset().in_bulk(*args, **kwargs)

def iterator(

self, *args, **kwargs)

def iterator(self, *args, **kwargs):
    return self.get_queryset().iterator(*args, **kwargs)

def last(

self)

def last(self):
    return self.get_queryset().last()

def latest(

self, *args, **kwargs)

def latest(self, *args, **kwargs):
    return self.get_queryset().latest(*args, **kwargs)

def none(

self, *args, **kwargs)

def none(self, *args, **kwargs):
    ""
    return self.get_query_set().none(*args, **kwargs)

def only(

self, *args, **kwargs)

def only(self, *args, **kwargs):
    return self.get_queryset().only(*args, **kwargs)

def order_by(

self, *args, **kwargs)

def order_by(self, *args, **kwargs):
    return self.get_queryset().order_by(*args, **kwargs)

Perform a quick search on code, title and collector name

def raw(

self, raw_query, params=None, *args, **kwargs)

def raw(self, raw_query, params=None, *args, **kwargs):
    return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)

def reverse(

self, *args, **kwargs)

def reverse(self, *args, **kwargs):
    return self.get_queryset().reverse(*args, **kwargs)

def select_for_update(

self, *args, **kwargs)

def select_for_update(self, *args, **kwargs):
    return self.get_queryset().select_for_update(*args, **kwargs)

def sound(

self, *args, **kwargs)

def sound(self, *args, **kwargs):
    return self.get_query_set().sound(*args, **kwargs)

def sound_public(

self, *args, **kwargs)

def sound_public(self, *args, **kwargs):
    return self.get_query_set().sound_public(*args, **kwargs)

def update(

self, *args, **kwargs)

def update(self, *args, **kwargs):
    return self.get_queryset().update(*args, **kwargs)

def using(

self, *args, **kwargs)

def using(self, *args, **kwargs):
    return self.get_queryset().using(*args, **kwargs)

def values(

self, *args, **kwargs)

def values(self, *args, **kwargs):
    return self.get_queryset().values(*args, **kwargs)

def values_list(

self, *args, **kwargs)

def values_list(self, *args, **kwargs):
    return self.get_queryset().values_list(*args, **kwargs)

def without_collection(

self, *args, **kwargs)

Find items which do not belong to any collection

def without_collection(self, *args, **kwargs):
    return self.get_query_set().without_collection(*args, **kwargs)

class MediaItemQuerySet

Base class for all media item query sets

class MediaItemQuerySet(CoreQuerySet):
    "Base class for all media item query sets"

    def quick_search(self, pattern):
        "Perform a quick search on code, title and collector name"

        # from telemeta.models.media import MediaItem
        # pattern = pattern.strip()
        # mod = MediaItem()
        # fields = mod.to_dict()
        # keys =  fields.keys()
        # q = self.by_fuzzy_collector_q(pattern)
        # for field in keys:
        #     field_str = str(mod._meta.get_field(field))
        #     if 'CharField' in field_str or 'TextField' in field_str:
        #         q = q | word_search_q(field, pattern)

        q = ( Q(code__contains=pattern) |
            Q(old_code__contains=pattern) |
            word_search_q('title', pattern) |
            word_search_q('comment', pattern) |
            self.by_fuzzy_collector_q(pattern) )

        return self.filter(q)


    def without_collection(self):
        "Find items which do not belong to any collection"
        return self.extra(
            where = ["collection_id NOT IN (SELECT id FROM media_collections)"]);

    def by_public_id(self, public_id):
        "Find items by public_id"
        return self.filter(public_id=public_id)

    def by_recording_date(self, from_date, to_date = None):
        "Find items by recording date"
        if to_date is None:
            return (self.filter(recorded_from_date__lte=from_date, recorded_to_date__gte=from_date))
        else :
            return (self.filter(Q(recorded_from_date__range=(from_date, to_date))
                                | Q(recorded_to_date__range=(from_date, to_date))))

    def by_title(self, pattern):
        "Find items by title"
        # to (sort of) sync with models.media.MediaItem.get_title()
        return self.filter(word_search_q("title", pattern) |
                           (Q(title="") & word_search_q("collection__title", pattern)))

    def by_publish_year(self, from_year, to_year = None):
        "Find items by publishing year"
        if to_year is None:
            to_year = from_year
        return self.filter(collection__year_published__range=(from_year, to_year))

    def by_change_time(self, from_time = None, until_time = None):
        "Find items by last change time"
        return self._by_change_time('item', from_time, until_time)

    def by_location(self, location):
        "Find items by location"
        return self.filter(location__in=location.apparented())

    @staticmethod
    def __name_cmp(obj1, obj2):
        return unaccent_icmp(obj1.name, obj2.name)

    def locations(self):
        from telemeta.models import Location, LocationRelation
        l = self.values('location')
        c = self.values('location__current_location')
        r = LocationRelation.objects.filter(location__in=l).values('ancestor_location')
        return Location.objects.filter(Q(pk__in=l) | Q(pk__in=r) | Q(pk__in=c))

    def countries(self, group_by_continent=False):
        countries = []
        from telemeta.models import Location
        for id in self.filter(location__isnull=False).values_list('location', flat=True).distinct():
            location = Location.objects.get(pk=id)
            for l in location.countries():
                c = l.current_location
                if not c in countries:
                    countries.append(c)

        if group_by_continent:
            grouped = {}

            for country in countries:
                for continent in country.continents():
                    if not grouped.has_key(continent):
                        grouped[continent] = []

                    grouped[continent].append(country)

            keys = grouped.keys()
            keys.sort(self.__name_cmp)
            ordered = []
            for c in keys:
                grouped[c].sort(self.__name_cmp)
                ordered.append({'continent': c, 'countries': grouped[c]})

            countries = ordered
        else:
            countries.sort(self.__name_cmp)

        return countries

    def virtual(self, *args):
        qs = self
        need_collection = False
        related = []
        from telemeta.models import Location
        for f in args:
            if f == 'apparent_collector':
                if not 'sqlite3' in engine and not 'postgresql_psycopg2' in engine:
                    related.append('collection')
                    qs = qs.extra(select={f:
                        'IF(collector_from_collection, '
                            'IF(media_collections.collector_is_creator, '
                               'media_collections.creator, '
                               'media_collections.collector),'
                            'media_items.collector)'})
            elif f == 'country_or_continent':
                related.append('location')
                if not 'sqlite3' in engine and not 'postgresql_psycopg2' in engine:
                    qs = qs.extra(select={f:
                        'IF(locations.type = ' + str(Location.COUNTRY) + ' '
                        'OR locations.type = ' + str(Location.CONTINENT) + ','
                        'locations.name, '
                        '(SELECT l2.name FROM location_relations AS r INNER JOIN locations AS l2 '
                        'ON r.ancestor_location_id = l2.id '
                        'WHERE r.location_id = media_items.location_id AND l2.type = ' + str(Location.COUNTRY) + ' LIMIT 1))'
                    })
            else:
                raise Exception("Unsupported virtual field: %s" % f)

        if related:
            qs = qs.select_related(*related)

        return qs

    def ethnic_groups(self):
        ids = self.filter(ethnic_group__isnull=False).values('ethnic_group');
        return EthnicGroup.objects.filter(pk__in=ids)

    @staticmethod
    def by_fuzzy_collector_q(pattern):
        return (word_search_q('collection__creator', pattern) |
                word_search_q('collection__collector', pattern) |
                word_search_q('collector', pattern))

    def by_fuzzy_collector(self, pattern):
        return self.filter(self.by_fuzzy_collector_q(pattern))

    def sound(self):
        return self.filter(Q(file__contains='/') | Q(url__contains='/'))

    def sound_public(self):
        return self.filter(Q(file__contains='/') | Q(url__contains='/'),
                public_access='full', collection__public_access='full')

    def by_instrument(self, name):
        "Find items by instrument"
        from telemeta.models.instrument import Instrument, InstrumentAlias
        from telemeta.models.item import MediaItemPerformance
        instruments = Instrument.objects.filter(name__icontains=name)
        aliases = InstrumentAlias.objects.filter(name__icontains=name)
        perf = []
        performances = MediaItemPerformance.objects.filter(Q(instrument__in=instruments) | Q(alias__in=aliases))
        for performance in performances:
            perf.append(performance)
        return self.filter(performances__in=perf).distinct()

Ancestors (in MRO)

  • MediaItemQuerySet
  • telemeta.models.core.CoreQuerySet
  • telemeta.models.core.EnhancedQuerySet
  • django.db.models.query.QuerySet
  • __builtin__.object

Class variables

var value_annotation

Static methods

def by_fuzzy_collector_q(

pattern)

@staticmethod
def by_fuzzy_collector_q(pattern):
    return (word_search_q('collection__creator', pattern) |
            word_search_q('collection__collector', pattern) |
            word_search_q('collector', pattern))

Instance variables

var db

Return the database that will be used if this query is executed now

var ordered

Returns True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model.

Methods

def __init__(

self, model=None, query=None, using=None)

def __init__(self, model=None, query=None, using=None):
    self.model = model
    self._db = using
    self.query = query or sql.Query(self.model)
    self._result_cache = None
    self._sticky_filter = False
    self._for_write = False
    self._prefetch_related_lookups = []
    self._prefetch_done = False
    self._known_related_objects = {}        # {rel_field, {pk: rel_obj}}

def aggregate(

self, *args, **kwargs)

Returns a dictionary containing the calculations (aggregation) over the current queryset

If args is present the expression is passed as a kwarg using the Aggregate object's default alias.

def aggregate(self, *args, **kwargs):
    """
    Returns a dictionary containing the calculations (aggregation)
    over the current queryset
    If args is present the expression is passed as a kwarg using
    the Aggregate object's default alias.
    """
    if self.query.distinct_fields:
        raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
    for arg in args:
        kwargs[arg.default_alias] = arg
    query = self.query.clone()
    for (alias, aggregate_expr) in kwargs.items():
        query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=True)
    return query.get_aggregation(using=self.db)

def all(

self)

Returns a new QuerySet that is a copy of the current one. This allows a QuerySet to proxy for a model manager in some cases.

def all(self):
    """
    Returns a new QuerySet that is a copy of the current one. This allows a
    QuerySet to proxy for a model manager in some cases.
    """
    return self._clone()

def annotate(

self, *args, **kwargs)

Return a query set in which the returned objects have been annotated with data aggregated from related fields.

def annotate(self, *args, **kwargs):
    """
    Return a query set in which the returned objects have been annotated
    with data aggregated from related fields.
    """
    for arg in args:
        if arg.default_alias in kwargs:
            raise ValueError("The named annotation '%s' conflicts with the "
                             "default name for another annotation."
                             % arg.default_alias)
        kwargs[arg.default_alias] = arg
    names = getattr(self, '_fields', None)
    if names is None:
        names = set(self.model._meta.get_all_field_names())
    for aggregate in kwargs:
        if aggregate in names:
            raise ValueError("The annotation '%s' conflicts with a field on "
                "the model." % aggregate)
    obj = self._clone()
    obj._setup_aggregate_query(list(kwargs))
    # Add the aggregates to the query
    for (alias, aggregate_expr) in kwargs.items():
        obj.query.add_aggregate(aggregate_expr, self.model, alias,
            is_summary=False)
    return obj

def bulk_create(

self, objs, batch_size=None)

Inserts each of the instances into the database. This does not call save() on each of the instances, does not send any pre/post save signals, and does not set the primary key attribute if it is an autoincrement field.

def bulk_create(self, objs, batch_size=None):
    """
    Inserts each of the instances into the database. This does *not* call
    save() on each of the instances, does not send any pre/post save
    signals, and does not set the primary key attribute if it is an
    autoincrement field.
    """
    # So this case is fun. When you bulk insert you don't get the primary
    # keys back (if it's an autoincrement), so you can't insert into the
    # child tables which references this. There are two workarounds, 1)
    # this could be implemented if you didn't have an autoincrement pk,
    # and 2) you could do it by doing O(n) normal inserts into the parent
    # tables to get the primary keys back, and then doing a single bulk
    # insert into the childmost table. Some databases might allow doing
    # this by using RETURNING clause for the insert query. We're punting
    # on these for now because they are relatively rare cases.
    assert batch_size is None or batch_size > 0
    if self.model._meta.parents:
        raise ValueError("Can't bulk create an inherited model")
    if not objs:
        return objs
    self._for_write = True
    connection = connections[self.db]
    fields = self.model._meta.local_concrete_fields
    with transaction.commit_on_success_unless_managed(using=self.db):
        if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
            and self.model._meta.has_auto_field):
            self._batched_insert(objs, fields, batch_size)
        else:
            objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
            if objs_with_pk:
                self._batched_insert(objs_with_pk, fields, batch_size)
            if objs_without_pk:
                fields= [f for f in fields if not isinstance(f, AutoField)]
                self._batched_insert(objs_without_pk, fields, batch_size)
    return objs

def by_change_time(

self, from_time=None, until_time=None)

Find items by last change time

def by_change_time(self, from_time = None, until_time = None):
    "Find items by last change time"
    return self._by_change_time('item', from_time, until_time)

def by_fuzzy_collector(

self, pattern)

def by_fuzzy_collector(self, pattern):
    return self.filter(self.by_fuzzy_collector_q(pattern))

def by_instrument(

self, name)

Find items by instrument

def by_instrument(self, name):
    "Find items by instrument"
    from telemeta.models.instrument import Instrument, InstrumentAlias
    from telemeta.models.item import MediaItemPerformance
    instruments = Instrument.objects.filter(name__icontains=name)
    aliases = InstrumentAlias.objects.filter(name__icontains=name)
    perf = []
    performances = MediaItemPerformance.objects.filter(Q(instrument__in=instruments) | Q(alias__in=aliases))
    for performance in performances:
        perf.append(performance)
    return self.filter(performances__in=perf).distinct()

def by_location(

self, location)

Find items by location

def by_location(self, location):
    "Find items by location"
    return self.filter(location__in=location.apparented())

def by_public_id(

self, public_id)

Find items by public_id

def by_public_id(self, public_id):
    "Find items by public_id"
    return self.filter(public_id=public_id)

def by_publish_year(

self, from_year, to_year=None)

Find items by publishing year

def by_publish_year(self, from_year, to_year = None):
    "Find items by publishing year"
    if to_year is None:
        to_year = from_year
    return self.filter(collection__year_published__range=(from_year, to_year))

def by_recording_date(

self, from_date, to_date=None)

Find items by recording date

def by_recording_date(self, from_date, to_date = None):
    "Find items by recording date"
    if to_date is None:
        return (self.filter(recorded_from_date__lte=from_date, recorded_to_date__gte=from_date))
    else :
        return (self.filter(Q(recorded_from_date__range=(from_date, to_date))
                            | Q(recorded_to_date__range=(from_date, to_date))))

def by_title(

self, pattern)

Find items by title

def by_title(self, pattern):
    "Find items by title"
    # to (sort of) sync with models.media.MediaItem.get_title()
    return self.filter(word_search_q("title", pattern) |
                       (Q(title="") & word_search_q("collection__title", pattern)))

def complex_filter(

self, filter_obj)

Returns a new QuerySet instance with filter_obj added to the filters.

filter_obj can be a Q object (or anything with an add_to_query() method) or a dictionary of keyword lookup arguments.

This exists to support framework features such as 'limit_choices_to', and usually it will be more natural to use other methods.

def complex_filter(self, filter_obj):
    """
    Returns a new QuerySet instance with filter_obj added to the filters.
    filter_obj can be a Q object (or anything with an add_to_query()
    method) or a dictionary of keyword lookup arguments.
    This exists to support framework features such as 'limit_choices_to',
    and usually it will be more natural to use other methods.
    """
    if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
        clone = self._clone()
        clone.query.add_q(filter_obj)
        return clone
    else:
        return self._filter_or_exclude(None, **filter_obj)

def count(

self)

Performs a SELECT COUNT() and returns the number of records as an integer.

If the QuerySet is already fully cached this simply returns the length of the cached results set to avoid multiple SELECT COUNT(*) calls.

def count(self):
    """
    Performs a SELECT COUNT() and returns the number of records as an
    integer.
    If the QuerySet is already fully cached this simply returns the length
    of the cached results set to avoid multiple SELECT COUNT(*) calls.
    """
    if self._result_cache is not None:
        return len(self._result_cache)
    return self.query.get_count(using=self.db)

def countries(

self, group_by_continent=False)

def countries(self, group_by_continent=False):
    countries = []
    from telemeta.models import Location
    for id in self.filter(location__isnull=False).values_list('location', flat=True).distinct():
        location = Location.objects.get(pk=id)
        for l in location.countries():
            c = l.current_location
            if not c in countries:
                countries.append(c)
    if group_by_continent:
        grouped = {}
        for country in countries:
            for continent in country.continents():
                if not grouped.has_key(continent):
                    grouped[continent] = []
                grouped[continent].append(country)
        keys = grouped.keys()
        keys.sort(self.__name_cmp)
        ordered = []
        for c in keys:
            grouped[c].sort(self.__name_cmp)
            ordered.append({'continent': c, 'countries': grouped[c]})
        countries = ordered
    else:
        countries.sort(self.__name_cmp)
    return countries

def create(

self, **kwargs)

Creates a new object with the given kwargs, saving it to the database and returning the created object.

def create(self, **kwargs):
    """
    Creates a new object with the given kwargs, saving it to the database
    and returning the created object.
    """
    obj = self.model(**kwargs)
    self._for_write = True
    obj.save(force_insert=True, using=self.db)
    return obj

def dates(

self, field_name, kind, order='ASC')

Returns a list of date objects representing all available dates for the given field_name, scoped to 'kind'.

def dates(self, field_name, kind, order='ASC'):
    """
    Returns a list of date objects representing all available dates for
    the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day"), \
            "'kind' must be one of 'year', 'month' or 'day'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    return self._clone(klass=DateQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order)

def datetimes(

self, field_name, kind, order='ASC', tzinfo=None)

Returns a list of datetime objects representing all available datetimes for the given field_name, scoped to 'kind'.

def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
    """
    Returns a list of datetime objects representing all available
    datetimes for the given field_name, scoped to 'kind'.
    """
    assert kind in ("year", "month", "day", "hour", "minute", "second"), \
            "'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
    assert order in ('ASC', 'DESC'), \
            "'order' must be either 'ASC' or 'DESC'."
    if settings.USE_TZ:
        if tzinfo is None:
            tzinfo = timezone.get_current_timezone()
    else:
        tzinfo = None
    return self._clone(klass=DateTimeQuerySet, setup=True,
            _field_name=field_name, _kind=kind, _order=order, _tzinfo=tzinfo)

def defer(

self, *fields)

Defers the loading of data for certain fields until they are accessed. The set of fields to defer is added to any existing set of deferred fields. The only exception to this is if None is passed in as the only parameter, in which case all deferrals are removed (None acts as a reset option).

def defer(self, *fields):
    """
    Defers the loading of data for certain fields until they are accessed.
    The set of fields to defer is added to any existing set of deferred
    fields. The only exception to this is if None is passed in as the only
    parameter, in which case all deferrals are removed (None acts as a
    reset option).
    """
    clone = self._clone()
    if fields == (None,):
        clone.query.clear_deferred_loading()
    else:
        clone.query.add_deferred_loading(fields)
    return clone

def delete(

self)

def delete(self):
    CHUNK=1024
    objects = self.model._meta.get_all_related_objects()
    ii = self.count()
    values = self.values_list('pk')
    for related in objects:
        i = 0
        while i < ii:
            ids = [v[0] for v in values[i:i + CHUNK]]
            filter = {related.field.name + '__pk__in': ids}
            q = related.model.objects.filter(**filter)
            if isinstance(related.field, WeakForeignKey):
                update = {related.field.name: None}
                q.update(**update)
            else:
                q.delete()
            i += CHUNK
    super(EnhancedQuerySet, self).delete()

def distinct(

self, *field_names)

Returns a new QuerySet instance that will select only distinct results.

def distinct(self, *field_names):
    """
    Returns a new QuerySet instance that will select only distinct results.
    """
    assert self.query.can_filter(), \
            "Cannot create distinct fields once a slice has been taken."
    obj = self._clone()
    obj.query.add_distinct_fields(*field_names)
    return obj

def earliest(

self, field_name=None)

def earliest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="")

def ethnic_groups(

self)

def ethnic_groups(self):
    ids = self.filter(ethnic_group__isnull=False).values('ethnic_group');
    return EthnicGroup.objects.filter(pk__in=ids)

def exclude(

self, *args, **kwargs)

Returns a new QuerySet instance with NOT (args) ANDed to the existing set.

def exclude(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with NOT (args) ANDed to the existing
    set.
    """
    return self._filter_or_exclude(True, *args, **kwargs)

def exists(

self)

def exists(self):
    if self._result_cache is None:
        return self.query.has_results(using=self.db)
    return bool(self._result_cache)

def extra(

self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None)

Adds extra SQL fragments to the query.

def extra(self, select=None, where=None, params=None, tables=None,
          order_by=None, select_params=None):
    """
    Adds extra SQL fragments to the query.
    """
    assert self.query.can_filter(), \
            "Cannot change a query once a slice has been taken"
    clone = self._clone()
    clone.query.add_extra(select, select_params, where, params, tables, order_by)
    return clone

def filter(

self, *args, **kwargs)

Returns a new QuerySet instance with the args ANDed to the existing set.

def filter(self, *args, **kwargs):
    """
    Returns a new QuerySet instance with the args ANDed to the existing
    set.
    """
    return self._filter_or_exclude(False, *args, **kwargs)

def first(

self)

Returns the first object of a query, returns None if no match is found.

def first(self):
    """
    Returns the first object of a query, returns None if no match is found.
    """
    qs = self if self.ordered else self.order_by('pk')
    try:
        return qs[0]
    except IndexError:
        return None

def get(

self, *args, **kwargs)

Performs the query and returns a single object matching the given keyword arguments.

def get(self, *args, **kwargs):
    """
    Performs the query and returns a single object matching the given
    keyword arguments.
    """
    clone = self.filter(*args, **kwargs)
    if self.query.can_filter():
        clone = clone.order_by()
    num = len(clone)
    if num == 1:
        return clone._result_cache[0]
    if not num:
        raise self.model.DoesNotExist(
            "%s matching query does not exist." %
            self.model._meta.object_name)
    raise self.model.MultipleObjectsReturned(
        "get() returned more than one %s -- it returned %s!" %
        (self.model._meta.object_name, num))

def get_or_create(

self, **kwargs)

Looks up an object with the given kwargs, creating one if necessary. Returns a tuple of (object, created), where created is a boolean specifying whether an object was created.

def get_or_create(self, **kwargs):
    """
    Looks up an object with the given kwargs, creating one if necessary.
    Returns a tuple of (object, created), where created is a boolean
    specifying whether an object was created.
    """
    defaults = kwargs.pop('defaults', {})
    lookup = kwargs.copy()
    for f in self.model._meta.fields:
        if f.attname in lookup:
            lookup[f.name] = lookup.pop(f.attname)
    try:
        self._for_write = True
        return self.get(**lookup), False
    except self.model.DoesNotExist:
        try:
            params = dict((k, v) for k, v in kwargs.items() if LOOKUP_SEP not in k)
            params.update(defaults)
            obj = self.model(**params)
            with transaction.atomic(using=self.db):
                obj.save(force_insert=True, using=self.db)
            return obj, True
        except DatabaseError:
            exc_info = sys.exc_info()
            try:
                return self.get(**lookup), False
            except self.model.DoesNotExist:
                # Re-raise the DatabaseError with its original traceback.
                six.reraise(*exc_info)

def in_bulk(

self, id_list)

Returns a dictionary mapping each of the given IDs to the object with that ID.

def in_bulk(self, id_list):
    """
    Returns a dictionary mapping each of the given IDs to the object with
    that ID.
    """
    assert self.query.can_filter(), \
            "Cannot use 'limit' or 'offset' with in_bulk"
    if not id_list:
        return {}
    qs = self.filter(pk__in=id_list).order_by()
    return dict([(obj._get_pk_val(), obj) for obj in qs])

def iterator(

self)

An iterator over the results from applying this QuerySet to the database.

def iterator(self):
    """
    An iterator over the results from applying this QuerySet to the
    database.
    """
    fill_cache = False
    if connections[self.db].features.supports_select_related:
        fill_cache = self.query.select_related
    if isinstance(fill_cache, dict):
        requested = fill_cache
    else:
        requested = None
    max_depth = self.query.max_depth
    extra_select = list(self.query.extra_select)
    aggregate_select = list(self.query.aggregate_select)
    only_load = self.query.get_loaded_field_names()
    if not fill_cache:
        fields = self.model._meta.concrete_fields
    load_fields = []
    # If only/defer clauses have been specified,
    # build the list of fields that are to be loaded.
    if only_load:
        for field, model in self.model._meta.get_concrete_fields_with_model():
            if model is None:
                model = self.model
            try:
                if field.name in only_load[model]:
                    # Add a field that has been explicitly included
                    load_fields.append(field.name)
            except KeyError:
                # Model wasn't explicitly listed in the only_load table
                # Therefore, we need to load all fields from this model
                load_fields.append(field.name)
    index_start = len(extra_select)
    aggregate_start = index_start + len(load_fields or self.model._meta.concrete_fields)
    skip = None
    if load_fields and not fill_cache:
        # Some fields have been deferred, so we have to initialise
        # via keyword arguments.
        skip = set()
        init_list = []
        for field in fields:
            if field.name not in load_fields:
                skip.add(field.attname)
            else:
                init_list.append(field.attname)
        model_cls = deferred_class_factory(self.model, skip)
    # Cache db and model outside the loop
    db = self.db
    model = self.model
    compiler = self.query.get_compiler(using=db)
    if fill_cache:
        klass_info = get_klass_info(model, max_depth=max_depth,
                                    requested=requested, only_load=only_load)
    for row in compiler.results_iter():
        if fill_cache:
            obj, _ = get_cached_row(row, index_start, db, klass_info,
                                    offset=len(aggregate_select))
        else:
            # Omit aggregates in object creation.
            row_data = row[index_start:aggregate_start]
            if skip:
                obj = model_cls(**dict(zip(init_list, row_data)))
            else:
                obj = model(*row_data)
            # Store the source database of the object
            obj._state.db = db
            # This object came from the database; it's not being added.
            obj._state.adding = False
        if extra_select:
            for i, k in enumerate(extra_select):
                setattr(obj, k, row[i])
        # Add the aggregates to the model
        if aggregate_select:
            for i, aggregate in enumerate(aggregate_select):
                setattr(obj, aggregate, row[i + aggregate_start])
        # Add the known related objects to the model, if there are any
        if self._known_related_objects:
            for field, rel_objs in self._known_related_objects.items():
                # Avoid overwriting objects loaded e.g. by select_related
                if hasattr(obj, field.get_cache_name()):
                    continue
                pk = getattr(obj, field.get_attname())
                try:
                    rel_obj = rel_objs[pk]
                except KeyError:
                    pass               # may happen in qs1 | qs2 scenarios
                else:
                    setattr(obj, field.name, rel_obj)
        yield obj

def last(

self)

Returns the last object of a query, returns None if no match is found.

def last(self):
    """
    Returns the last object of a query, returns None if no match is found.
    """
    qs = self.reverse() if self.ordered else self.order_by('-pk')
    try:
        return qs[0]
    except IndexError:
        return None

def latest(

self, field_name=None)

def latest(self, field_name=None):
    return self._earliest_or_latest(field_name=field_name, direction="-")

def locations(

self)

def locations(self):
    from telemeta.models import Location, LocationRelation
    l = self.values('location')
    c = self.values('location__current_location')
    r = LocationRelation.objects.filter(location__in=l).values('ancestor_location')
    return Location.objects.filter(Q(pk__in=l) | Q(pk__in=r) | Q(pk__in=c))

def none(

self)

Return an empty result set

def none(self): # redundant with none() in recent Django svn
    "Return an empty result set"
    return self.extra(where = ["0 = 1"])

def only(

self, *fields)

Essentially, the opposite of defer. Only the fields passed into this method and that are not already specified as deferred are loaded immediately when the queryset is evaluated.

def only(self, *fields):
    """
    Essentially, the opposite of defer. Only the fields passed into this
    method and that are not already specified as deferred are loaded
    immediately when the queryset is evaluated.
    """
    if fields == (None,):
        # Can only pass None to defer(), not only(), as the rest option.
        # That won't stop people trying to do this, so let's be explicit.
        raise TypeError("Cannot pass None as an argument to only().")
    clone = self._clone()
    clone.query.add_immediate_loading(fields)
    return clone

def order_by(

self, *field_names)

Returns a new QuerySet instance with the ordering changed.

def order_by(self, *field_names):
    """
    Returns a new QuerySet instance with the ordering changed.
    """
    assert self.query.can_filter(), \
            "Cannot reorder a query once a slice has been taken."
    obj = self._clone()
    obj.query.clear_ordering(force_empty=False)
    obj.query.add_ordering(*field_names)
    return obj

Returns a new QuerySet instance that will prefetch the specified Many-To-One and Many-To-Many related objects when the QuerySet is evaluated.

When prefetch_related() is called more than once, the list of lookups to prefetch is appended to. If prefetch_related(None) is called, the the list is cleared.

Perform a quick search on code, title and collector name

def reverse(

self)

Reverses the ordering of the QuerySet.

def reverse(self):
    """
    Reverses the ordering of the QuerySet.
    """
    clone = self._clone()
    clone.query.standard_ordering = not clone.query.standard_ordering
    return clone

def select_for_update(

self, **kwargs)

Returns a new QuerySet instance that will select objects with a FOR UPDATE lock.

def select_for_update(self, **kwargs):
    """
    Returns a new QuerySet instance that will select objects with a
    FOR UPDATE lock.
    """
    # Default to false for nowait
    nowait = kwargs.pop('nowait', False)
    obj = self._clone()
    obj._for_write = True
    obj.query.select_for_update = True
    obj.query.select_for_update_nowait = nowait
    return obj

Returns a new QuerySet instance that will select related objects.

If fields are specified, they must be ForeignKey fields and only those related objects are included in the selection.

If select_related(None) is called, the list is cleared.

def sound(

self)

def sound(self):
    return self.filter(Q(file__contains='/') | Q(url__contains='/'))

def sound_public(

self)

def sound_public(self):
    return self.filter(Q(file__contains='/') | Q(url__contains='/'),
            public_access='full', collection__public_access='full')

def update(

self, **kwargs)

Updates all elements in the current QuerySet, setting all the given fields to the appropriate values.

def update(self, **kwargs):
    """
    Updates all elements in the current QuerySet, setting all the given
    fields to the appropriate values.
    """
    assert self.query.can_filter(), \
            "Cannot update a query once a slice has been taken."
    self._for_write = True
    query = self.query.clone(sql.UpdateQuery)
    query.add_update_values(kwargs)
    with transaction.commit_on_success_unless_managed(using=self.db):
        rows = query.get_compiler(self.db).execute_sql(None)
    self._result_cache = None
    return rows

def using(

self, alias)

Selects which database this QuerySet should excecute its query against.

def using(self, alias):
    """
    Selects which database this QuerySet should excecute its query against.
    """
    clone = self._clone()
    clone._db = alias
    return clone

def values(

self, *fields)

def values(self, *fields):
    return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)

def values_list(

self, *fields, **kwargs)

def values_list(self, *fields, **kwargs):
    flat = kwargs.pop('flat', False)
    if kwargs:
        raise TypeError('Unexpected keyword arguments to values_list: %s'
                % (list(kwargs),))
    if flat and len(fields) > 1:
        raise TypeError("'flat' is not valid when values_list is called with more than one field.")
    return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
            _fields=fields)

def virtual(

self, *args)

def virtual(self, *args):
    qs = self
    need_collection = False
    related = []
    from telemeta.models import Location
    for f in args:
        if f == 'apparent_collector':
            if not 'sqlite3' in engine and not 'postgresql_psycopg2' in engine:
                related.append('collection')
                qs = qs.extra(select={f:
                    'IF(collector_from_collection, '
                        'IF(media_collections.collector_is_creator, '
                           'media_collections.creator, '
                           'media_collections.collector),'
                        'media_items.collector)'})
        elif f == 'country_or_continent':
            related.append('location')
            if not 'sqlite3' in engine and not 'postgresql_psycopg2' in engine:
                qs = qs.extra(select={f:
                    'IF(locations.type = ' + str(Location.COUNTRY) + ' '
                    'OR locations.type = ' + str(Location.CONTINENT) + ','
                    'locations.name, '
                    '(SELECT l2.name FROM location_relations AS r INNER JOIN locations AS l2 '
                    'ON r.ancestor_location_id = l2.id '
                    'WHERE r.location_id = media_items.location_id AND l2.type = ' + str(Location.COUNTRY) + ' LIMIT 1))'
                })
        else:
            raise Exception("Unsupported virtual field: %s" % f)
    if related:
        qs = qs.select_related(*related)
    return qs

def without_collection(

self)

Find items which do not belong to any collection

def without_collection(self):
    "Find items which do not belong to any collection"
    return self.extra(
        where = ["collection_id NOT IN (SELECT id FROM media_collections)"]);