Top

telemeta.models.collection module

# -*- coding: utf-8 -*-
# Copyright (C) 2010 Samalyse SARL
# Copyright (C) 2010-2014 Parisson SARL

# This file is part of Telemeta.

# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.

# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.

# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
# Authors: Olivier Guilyardi <olivier@samalyse.com>
#          David LIPSZYC <davidlipszyc@gmail.com>
#          Guillaume Pellerin <yomguy@parisson.com>


from __future__ import division
from django.utils.translation import ugettext_lazy as _
from telemeta.models.core import *
from telemeta.models.query import *
from telemeta.models.identifier import *
from telemeta.models.resource import *

# Special code regex of collections for the branch
collection_published_code_regex = getattr(settings, 'COLLECTION_PUBLISHED_CODE_REGEX', '[A-Za-z0-9._-]*')
collection_unpublished_code_regex = getattr(settings, 'COLLECTION_UNPUBLISHED_CODE_REGEX', '[A-Za-z0-9._-]*')

# CREM
#collection_published_code_regex   = 'CNRSMH_E_[0-9]{4}(?:_[0-9]{3}){2}'
#collection_unpublished_code_regex = 'CNRSMH_I_[0-9]{4}_[0-9]{3}'

collection_code_regex = '(?:%s|%s)' % (collection_published_code_regex,
                                       collection_unpublished_code_regex)

class MediaCollection(MediaResource):
    "Describe a collection of items"

    element_type = 'collection'

    def is_valid_collection_code(value):
        "Check if the collection code is well formed"
        regex = '^' + collection_code_regex + '$'
        if not re.match(regex, value):
            raise ValidationError(u'%s is not a valid collection code' % value)

    # General informations
    title                 = CharField(_('title'), required=True)
    alt_title             = CharField(_('original title / translation'))
    creator               = CharField(_('depositor / contributor'), help_text=_('First name, Last name ; First name, Last name'))
    description           = TextField(_('description'))
    recording_context     = WeakForeignKey('RecordingContext', related_name="collections", verbose_name=_('recording context'))
    recorded_from_year    = IntegerField(_('recording year (from)'), help_text=_('YYYY'))
    recorded_to_year      = IntegerField(_('recording year (until)'), help_text=_('YYYY'))
    year_published        = IntegerField(_('year published'), help_text=_('YYYY'))
    public_access         = CharField(_('access type'), choices=PUBLIC_ACCESS_CHOICES, max_length=16, default="metadata")

    # Geographic and cultural informations
    # See "countries" and "ethnic_groups" methods below

    # Legal notices
    collector             = CharField(_('recordist'), help_text=_('First name, Last name ; First name, Last name'))
    publisher             = WeakForeignKey('Publisher', related_name="collections", verbose_name=_('publisher'))
    publisher_collection  = WeakForeignKey('PublisherCollection', related_name="collections", verbose_name=_('publisher collection'))
    publisher_serial      = CharField(_('publisher serial number'))
    booklet_author        = CharField(_('booklet author'), blank=True)
    reference             = CharField(_('publisher reference'))
    external_references   = TextField(_('bibliographic references'))

    auto_period_access    = BooleanField(_('automatic access after a rolling period'), default=True)
    legal_rights          = WeakForeignKey('LegalRight', related_name="collections", verbose_name=_('legal rights'))

    # Archiving data
    code                  = CharField(_('code'), unique=True, required=True, validators=[is_valid_collection_code])
    old_code              = CharField(_('old code'), unique=False, null=True, blank=True)
    acquisition_mode      = WeakForeignKey('AcquisitionMode', related_name="collections", verbose_name=_('mode of acquisition'))
    cnrs_contributor      = CharField(_('CNRS depositor'))
    copy_type             = WeakForeignKey('CopyType', related_name="collections", verbose_name=_('copy type'))
    metadata_author       = WeakForeignKey('MetadataAuthor', related_name="collections", verbose_name=_('record author'))
    booklet_description   = TextField(_('related documentation'))
    publishing_status     = WeakForeignKey('PublishingStatus', related_name="collections", verbose_name=_('secondary edition'))
    status                = WeakForeignKey('Status', related_name="collections", verbose_name=_('collection status'))
    alt_copies            = TextField(_('copies'))
    comment               = TextField(_('comment'))
    metadata_writer       = WeakForeignKey('MetadataWriter', related_name="collections", verbose_name=_('record writer'))
    archiver_notes        = TextField(_('archiver notes'))
    items_done            = CharField(_('items finished'))
    collector_is_creator  = BooleanField(_('recordist identical to depositor'))
    is_published          = BooleanField(_('published'))
    conservation_site     = CharField(_('conservation site'))

    # Technical data
    media_type            = WeakForeignKey('MediaType', related_name="collections", verbose_name=_('media type'))
    approx_duration       = DurationField(_('estimated duration'), help_text='hh:mm:ss')
    physical_items_num    = IntegerField(_('number of components (medium / piece)'))
    original_format       = WeakForeignKey('OriginalFormat', related_name="collections", verbose_name=_('original format'))
    physical_format       = WeakForeignKey('PhysicalFormat', related_name="collections", verbose_name=_('archive format'))
    ad_conversion         = WeakForeignKey('AdConversion', related_name='collections', verbose_name=_('digitization'))

    # No more used old fields
    alt_ids               = CharField(_('copies (obsolete field)'))
    travail               = CharField(_('archiver notes (obsolete field)'))

    # All
    objects               = MediaCollectionManager()

    exclude = ['alt_ids', 'travail']

    permissions = (("can_download_collection_epub", "Can download collection EPUB"),)

    class Meta(MetaCore):
        db_table = 'media_collections'
        ordering = ['code']
        verbose_name = _('collection')

    def __unicode__(self):
        return self.code

    def save(self, force_insert=False, force_update=False, *args, **kwargs):
        super(MediaCollection, self).save(force_insert, force_update, *args, **kwargs)

    @property
    def public_id(self):
        return self.code

    @property
    def has_mediafile(self):
        "Tell wether this collection has any media files attached to its items"
        items = self.items.all()
        for item in items:
            if item.file:
                return True
        return False

    def __name_cmp(self, obj1, obj2):
        return unaccent_icmp(obj1.name, obj2.name)

    def countries(self):
        "Return the countries of the items"
        countries = []
        for item in self.items.filter(location__isnull=False):
            for country in item.location.countries():
                if not country in countries:
                    countries.append(country)
        countries.sort(self.__name_cmp)
        return countries
    countries.verbose_name = _("states / nations")

    def main_countries(self):
        "Return the main countries of the items (no aliases or ancestors)"
        countries = []
        for item in self.items.filter(location__isnull=False):
            if not item.location in countries:
                countries.append(item.location)
        countries.sort(self.__name_cmp)
        return countries
    main_countries.verbose_name = _("states / nations")

    def ethnic_groups(self):
        "Return the ethnic groups of the items"
        groups = []
        items = self.items.all()
        for item in items:
            if item.ethnic_group and not item.ethnic_group in groups:
                groups.append(item.ethnic_group)

        cmp = lambda a, b: unaccent_icmp(a.value, b.value)
        groups.sort(cmp)

        return groups
    ethnic_groups.verbose_name = _('populations / social groups')

    def computed_duration(self):
        duration = Duration()
        for item in self.items.all():
            duration += item.computed_duration()
        return duration
    computed_duration.verbose_name = _('computed duration')

    def computed_size(self):
        "Return the total size of a collection"
        size = 0
        for item in self.items.all():
            size += item.size()
        return size
    computed_size.verbose_name = _('collection size')

    def document_status(self):
        if '_I_' in self.public_id:
            return 'Unpublished'
        elif '_E_' in self.public_id:
            return 'Published'
        else:
            return 'Unknown'

    def get_url(self):
        return get_full_url(reverse('telemeta-collection-detail', kwargs={'public_id':self.pk}))

    def to_dict_with_more(self):
        # metadata = model_to_dict(self, fields=[], exclude=self.exclude)
        metadata = self.to_dict()
        for key in self.exclude:
            if key in metadata.keys():
                del metadata[key]

        metadata['url'] = get_full_url(reverse('telemeta-collection-detail', kwargs={'public_id':self.pk}))
        metadata['doc_status'] = self.document_status()
        metadata['countries'] = ';'.join([location.name for location in self.main_countries()])
        metadata['ethnic_groups'] = ';'.join([group.value for group in self.ethnic_groups()])
        revision = self.get_revision()
        if revision:
            metadata['last_modification_date'] = unicode(revision.time)
        metadata['computed_duration'] = unicode(self.computed_duration())
        metadata['computed_size'] = unicode(self.computed_size())
        metadata['number_of_items'] = unicode(self.items.all().count())
        metadata['approx_duration'] = unicode(self.approx_duration)

        i = 0
        for media in self.related.all():
            metadata['related_media_title' + '_' + str(i)] = media.title
            if media.url:
                tag = 'related_media_url' + '_' + str(i)
                metadata[tag] = media.url
            elif media.url:
                metadata[tag] = get_full_url(reverse('telemeta-collection-related',
                                            kwargs={'public_id': self.public_id, 'media_id': media.id}))
            i += 1

        # One ID only
        identifiers = self.identifiers.all()
        if identifiers:
            identifier = identifiers[0]
            metadata['identifier_id'] = identifier.identifier
            metadata['identifier_type'] = identifier.type
            metadata['identifier_date'] = unicode(identifier.date_last)
            metadata['identifier_note'] = identifier.notes

        # All IDs
        # i = 0
        # for indentifier in self.identifiers.all():
        #     metadata['identifier' + '_' + str(i)] = identifier.identifier
        #     metadata['identifier_type' + '_' + str(i)] = identifier.type
        #     metadata['identifier_date_last' + '_' + str(i)] = unicode(identifier.date_last)
        #     metadata['identifier_notes' + '_' + str(i)] = identifier.notes
        #     i += 1

        return metadata

    def get_json(self):
        import json
        return json.dumps(self.to_dict_with_more())

    def to_row(self, tags):
        row = []
        _dict = self.to_dict_with_more()
        for tag in tags:
            if tag in _dict.keys():
                row.append(_dict[tag])
            else:
                row.append('')
        return row


class MediaCollectionRelated(MediaRelated):
    "Collection related media"

    collection      = ForeignKey('MediaCollection', related_name="related", verbose_name=_('collection'))

    class Meta(MetaCore):
        db_table = 'media_collection_related'
        verbose_name = _('collection related media')
        verbose_name_plural = _('collection related media')


class MediaCollectionIdentifier(Identifier):
    """Collection identifier"""

    collection = ForeignKey(MediaCollection, related_name="identifiers", verbose_name=_('collection'))

    class Meta(MetaCore):
        db_table = 'media_collection_identifier'
        verbose_name = _('collection identifier')
        verbose_name_plural = _('collection identifiers')
        unique_together = ('identifier', 'collection')

Module variables

var PUBLIC_ACCESS_CHOICES

var app_name

var code_linesep

var collection_code_regex

var collection_published_code_regex

var collection_unpublished_code_regex

var default_decoding

var default_encoding

var engine

var eol

var ext

var mime_type

var private_extra_types

var public_extra_types

var resource_code_regex

var strict_code

Classes

class MediaCollection

Describe a collection of items

class MediaCollection(MediaResource):
    "Describe a collection of items"

    element_type = 'collection'

    def is_valid_collection_code(value):
        "Check if the collection code is well formed"
        regex = '^' + collection_code_regex + '$'
        if not re.match(regex, value):
            raise ValidationError(u'%s is not a valid collection code' % value)

    # General informations
    title                 = CharField(_('title'), required=True)
    alt_title             = CharField(_('original title / translation'))
    creator               = CharField(_('depositor / contributor'), help_text=_('First name, Last name ; First name, Last name'))
    description           = TextField(_('description'))
    recording_context     = WeakForeignKey('RecordingContext', related_name="collections", verbose_name=_('recording context'))
    recorded_from_year    = IntegerField(_('recording year (from)'), help_text=_('YYYY'))
    recorded_to_year      = IntegerField(_('recording year (until)'), help_text=_('YYYY'))
    year_published        = IntegerField(_('year published'), help_text=_('YYYY'))
    public_access         = CharField(_('access type'), choices=PUBLIC_ACCESS_CHOICES, max_length=16, default="metadata")

    # Geographic and cultural informations
    # See "countries" and "ethnic_groups" methods below

    # Legal notices
    collector             = CharField(_('recordist'), help_text=_('First name, Last name ; First name, Last name'))
    publisher             = WeakForeignKey('Publisher', related_name="collections", verbose_name=_('publisher'))
    publisher_collection  = WeakForeignKey('PublisherCollection', related_name="collections", verbose_name=_('publisher collection'))
    publisher_serial      = CharField(_('publisher serial number'))
    booklet_author        = CharField(_('booklet author'), blank=True)
    reference             = CharField(_('publisher reference'))
    external_references   = TextField(_('bibliographic references'))

    auto_period_access    = BooleanField(_('automatic access after a rolling period'), default=True)
    legal_rights          = WeakForeignKey('LegalRight', related_name="collections", verbose_name=_('legal rights'))

    # Archiving data
    code                  = CharField(_('code'), unique=True, required=True, validators=[is_valid_collection_code])
    old_code              = CharField(_('old code'), unique=False, null=True, blank=True)
    acquisition_mode      = WeakForeignKey('AcquisitionMode', related_name="collections", verbose_name=_('mode of acquisition'))
    cnrs_contributor      = CharField(_('CNRS depositor'))
    copy_type             = WeakForeignKey('CopyType', related_name="collections", verbose_name=_('copy type'))
    metadata_author       = WeakForeignKey('MetadataAuthor', related_name="collections", verbose_name=_('record author'))
    booklet_description   = TextField(_('related documentation'))
    publishing_status     = WeakForeignKey('PublishingStatus', related_name="collections", verbose_name=_('secondary edition'))
    status                = WeakForeignKey('Status', related_name="collections", verbose_name=_('collection status'))
    alt_copies            = TextField(_('copies'))
    comment               = TextField(_('comment'))
    metadata_writer       = WeakForeignKey('MetadataWriter', related_name="collections", verbose_name=_('record writer'))
    archiver_notes        = TextField(_('archiver notes'))
    items_done            = CharField(_('items finished'))
    collector_is_creator  = BooleanField(_('recordist identical to depositor'))
    is_published          = BooleanField(_('published'))
    conservation_site     = CharField(_('conservation site'))

    # Technical data
    media_type            = WeakForeignKey('MediaType', related_name="collections", verbose_name=_('media type'))
    approx_duration       = DurationField(_('estimated duration'), help_text='hh:mm:ss')
    physical_items_num    = IntegerField(_('number of components (medium / piece)'))
    original_format       = WeakForeignKey('OriginalFormat', related_name="collections", verbose_name=_('original format'))
    physical_format       = WeakForeignKey('PhysicalFormat', related_name="collections", verbose_name=_('archive format'))
    ad_conversion         = WeakForeignKey('AdConversion', related_name='collections', verbose_name=_('digitization'))

    # No more used old fields
    alt_ids               = CharField(_('copies (obsolete field)'))
    travail               = CharField(_('archiver notes (obsolete field)'))

    # All
    objects               = MediaCollectionManager()

    exclude = ['alt_ids', 'travail']

    permissions = (("can_download_collection_epub", "Can download collection EPUB"),)

    class Meta(MetaCore):
        db_table = 'media_collections'
        ordering = ['code']
        verbose_name = _('collection')

    def __unicode__(self):
        return self.code

    def save(self, force_insert=False, force_update=False, *args, **kwargs):
        super(MediaCollection, self).save(force_insert, force_update, *args, **kwargs)

    @property
    def public_id(self):
        return self.code

    @property
    def has_mediafile(self):
        "Tell wether this collection has any media files attached to its items"
        items = self.items.all()
        for item in items:
            if item.file:
                return True
        return False

    def __name_cmp(self, obj1, obj2):
        return unaccent_icmp(obj1.name, obj2.name)

    def countries(self):
        "Return the countries of the items"
        countries = []
        for item in self.items.filter(location__isnull=False):
            for country in item.location.countries():
                if not country in countries:
                    countries.append(country)
        countries.sort(self.__name_cmp)
        return countries
    countries.verbose_name = _("states / nations")

    def main_countries(self):
        "Return the main countries of the items (no aliases or ancestors)"
        countries = []
        for item in self.items.filter(location__isnull=False):
            if not item.location in countries:
                countries.append(item.location)
        countries.sort(self.__name_cmp)
        return countries
    main_countries.verbose_name = _("states / nations")

    def ethnic_groups(self):
        "Return the ethnic groups of the items"
        groups = []
        items = self.items.all()
        for item in items:
            if item.ethnic_group and not item.ethnic_group in groups:
                groups.append(item.ethnic_group)

        cmp = lambda a, b: unaccent_icmp(a.value, b.value)
        groups.sort(cmp)

        return groups
    ethnic_groups.verbose_name = _('populations / social groups')

    def computed_duration(self):
        duration = Duration()
        for item in self.items.all():
            duration += item.computed_duration()
        return duration
    computed_duration.verbose_name = _('computed duration')

    def computed_size(self):
        "Return the total size of a collection"
        size = 0
        for item in self.items.all():
            size += item.size()
        return size
    computed_size.verbose_name = _('collection size')

    def document_status(self):
        if '_I_' in self.public_id:
            return 'Unpublished'
        elif '_E_' in self.public_id:
            return 'Published'
        else:
            return 'Unknown'

    def get_url(self):
        return get_full_url(reverse('telemeta-collection-detail', kwargs={'public_id':self.pk}))

    def to_dict_with_more(self):
        # metadata = model_to_dict(self, fields=[], exclude=self.exclude)
        metadata = self.to_dict()
        for key in self.exclude:
            if key in metadata.keys():
                del metadata[key]

        metadata['url'] = get_full_url(reverse('telemeta-collection-detail', kwargs={'public_id':self.pk}))
        metadata['doc_status'] = self.document_status()
        metadata['countries'] = ';'.join([location.name for location in self.main_countries()])
        metadata['ethnic_groups'] = ';'.join([group.value for group in self.ethnic_groups()])
        revision = self.get_revision()
        if revision:
            metadata['last_modification_date'] = unicode(revision.time)
        metadata['computed_duration'] = unicode(self.computed_duration())
        metadata['computed_size'] = unicode(self.computed_size())
        metadata['number_of_items'] = unicode(self.items.all().count())
        metadata['approx_duration'] = unicode(self.approx_duration)

        i = 0
        for media in self.related.all():
            metadata['related_media_title' + '_' + str(i)] = media.title
            if media.url:
                tag = 'related_media_url' + '_' + str(i)
                metadata[tag] = media.url
            elif media.url:
                metadata[tag] = get_full_url(reverse('telemeta-collection-related',
                                            kwargs={'public_id': self.public_id, 'media_id': media.id}))
            i += 1

        # One ID only
        identifiers = self.identifiers.all()
        if identifiers:
            identifier = identifiers[0]
            metadata['identifier_id'] = identifier.identifier
            metadata['identifier_type'] = identifier.type
            metadata['identifier_date'] = unicode(identifier.date_last)
            metadata['identifier_note'] = identifier.notes

        # All IDs
        # i = 0
        # for indentifier in self.identifiers.all():
        #     metadata['identifier' + '_' + str(i)] = identifier.identifier
        #     metadata['identifier_type' + '_' + str(i)] = identifier.type
        #     metadata['identifier_date_last' + '_' + str(i)] = unicode(identifier.date_last)
        #     metadata['identifier_notes' + '_' + str(i)] = identifier.notes
        #     i += 1

        return metadata

    def get_json(self):
        import json
        return json.dumps(self.to_dict_with_more())

    def to_row(self, tags):
        row = []
        _dict = self.to_dict_with_more()
        for tag in tags:
            if tag in _dict.keys():
                row.append(_dict[tag])
            else:
                row.append('')
        return row

Ancestors (in MRO)

  • MediaCollection
  • telemeta.models.resource.MediaResource
  • telemeta.models.core.ModelCore
  • telemeta.models.core.EnhancedModel
  • django.db.models.base.Model
  • dirtyfields.dirtyfields.DirtyFieldsMixin
  • __builtin__.object

Class variables

var DoesNotExist

var ENABLE_M2M_CHECK

var Meta

var MultipleObjectsReturned

var acquisition_mode

var ad_conversion

var alt_copies

var alt_ids

var alt_title

var approx_duration

var archiver_notes

var auto_period_access

var booklet_author

var booklet_description

var cnrs_contributor

var code

var collector

var collector_is_creator

var comment

var compare_function

var conservation_site

var copy_type

var corpus

var creator

var description

var element_type

var exclude

var external_references

var identifiers

var is_published

var items

var items_done

var legal_rights

var media_type

var metadata_author

var metadata_writer

var objects

var old_code

var original_format

var permissions

var physical_format

var physical_items_num

var public_access

var publisher

var publisher_collection

var publisher_serial

var publishing_status

var recorded_from_year

var recorded_to_year

var recording_context

var reference

var related

var status

var title

var travail

var year_published

Static methods

def get_dom_field_name(

field_name)

Convert the class name to a DOM element name

@staticmethod
def get_dom_field_name(field_name):
    "Convert the class name to a DOM element name"
    tokens = field_name.split('_')
    name = tokens[0]
    for t in tokens[1:]:
        name += t[0].upper() + t[1:]
    return name

Instance variables

var has_mediafile

Tell wether this collection has any media files attached to its items

var pk

var public_id

Methods

def __init__(

self, *args, **kwargs)

def __init__(self, *args, **kwargs):
    signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
    # Set up the storage for instance state
    self._state = ModelState()
    # There is a rather weird disparity here; if kwargs, it's set, then args
    # overrides it. It should be one or the other; don't duplicate the work
    # The reason for the kwargs check is that standard iterator passes in by
    # args, and instantiation for iteration is 33% faster.
    args_len = len(args)
    if args_len > len(self._meta.concrete_fields):
        # Daft, but matches old exception sans the err msg.
        raise IndexError("Number of args exceeds number of fields")
    if not kwargs:
        fields_iter = iter(self._meta.concrete_fields)
        # The ordering of the zip calls matter - zip throws StopIteration
        # when an iter throws it. So if the first iter throws it, the second
        # is *not* consumed. We rely on this, so don't change the order
        # without changing the logic.
        for val, field in zip(args, fields_iter):
            setattr(self, field.attname, val)
    else:
        # Slower, kwargs-ready version.
        fields_iter = iter(self._meta.fields)
        for val, field in zip(args, fields_iter):
            setattr(self, field.attname, val)
            kwargs.pop(field.name, None)
            # Maintain compatibility with existing calls.
            if isinstance(field.rel, ManyToOneRel):
                kwargs.pop(field.attname, None)
    # Now we're left with the unprocessed fields that *must* come from
    # keywords, or default.
    for field in fields_iter:
        is_related_object = False
        # This slightly odd construct is so that we can access any
        # data-descriptor object (DeferredAttribute) without triggering its
        # __get__ method.
        if (field.attname not in kwargs and
                (isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
                 or field.column is None)):
            # This field will be populated on request.
            continue
        if kwargs:
            if isinstance(field.rel, ForeignObjectRel):
                try:
                    # Assume object instance was passed in.
                    rel_obj = kwargs.pop(field.name)
                    is_related_object = True
                except KeyError:
                    try:
                        # Object instance wasn't passed in -- must be an ID.
                        val = kwargs.pop(field.attname)
                    except KeyError:
                        val = field.get_default()
                else:
                    # Object instance was passed in. Special case: You can
                    # pass in "None" for related objects if it's allowed.
                    if rel_obj is None and field.null:
                        val = None
            else:
                try:
                    val = kwargs.pop(field.attname)
                except KeyError:
                    # This is done with an exception rather than the
                    # default argument on pop because we don't want
                    # get_default() to be evaluated, and then not used.
                    # Refs #12057.
                    val = field.get_default()
        else:
            val = field.get_default()
        if is_related_object:
            # If we are passed a related instance, set it using the
            # field.name instead of field.attname (e.g. "user" instead of
            # "user_id") so that the object gets properly cached (and type
            # checked) by the RelatedObjectDescriptor.
            setattr(self, field.name, rel_obj)
        else:
            setattr(self, field.attname, val)
    if kwargs:
        for prop in list(kwargs):
            try:
                if isinstance(getattr(self.__class__, prop), property):
                    setattr(self, prop, kwargs.pop(prop))
            except AttributeError:
                pass
        if kwargs:
            raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
    super(Model, self).__init__()
    signals.post_init.send(sender=self.__class__, instance=self)

def clean(

self)

Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS.

def clean(self):
    """
    Hook for doing any extra model-wide validation after clean() has been
    called on every field by self.clean_fields. Any ValidationError raised
    by this method will not be associated with a particular field; it will
    have a special-case association with the field defined by NON_FIELD_ERRORS.
    """
    pass

def clean_fields(

self, exclude=None)

Cleans all fields and raises a ValidationError containing message_dict of all validation errors if any occur.

def clean_fields(self, exclude=None):
    """
    Cleans all fields and raises a ValidationError containing message_dict
    of all validation errors if any occur.
    """
    if exclude is None:
        exclude = []
    errors = {}
    for f in self._meta.fields:
        if f.name in exclude:
            continue
        # Skip validation for empty fields with blank=True. The developer
        # is responsible for making sure they have a valid value.
        raw_value = getattr(self, f.attname)
        if f.blank and raw_value in f.empty_values:
            continue
        try:
            setattr(self, f.attname, f.clean(raw_value, self))
        except ValidationError as e:
            errors[f.name] = e.error_list
    if errors:
        raise ValidationError(errors)

def computed_duration(

self)

def computed_duration(self):
    duration = Duration()
    for item in self.items.all():
        duration += item.computed_duration()
    return duration

def computed_size(

self)

Return the total size of a collection

def computed_size(self):
    "Return the total size of a collection"
    size = 0
    for item in self.items.all():
        size += item.size()
    return size

def countries(

self)

Return the countries of the items

def countries(self):
    "Return the countries of the items"
    countries = []
    for item in self.items.filter(location__isnull=False):
        for country in item.location.countries():
            if not country in countries:
                countries.append(country)
    countries.sort(self.__name_cmp)
    return countries

def date_error_message(

self, lookup_type, field, unique_for)

def date_error_message(self, lookup_type, field, unique_for):
    opts = self._meta
    return _("%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
        'field_name': six.text_type(capfirst(opts.get_field(field).verbose_name)),
        'date_field': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
        'lookup': lookup_type,
    }

def delete(

self)

def delete(self):
    if not self.pk:
        raise Exception("Can't delete without a primary key")
    self.__class__.objects.filter(pk=self.pk).delete()

def document_status(

self)

def document_status(self):
    if '_I_' in self.public_id:
        return 'Unpublished'
    elif '_E_' in self.public_id:
        return 'Published'
    else:
        return 'Unknown'

def ethnic_groups(

self)

Return the ethnic groups of the items

def ethnic_groups(self):
    "Return the ethnic groups of the items"
    groups = []
    items = self.items.all()
    for item in items:
        if item.ethnic_group and not item.ethnic_group in groups:
            groups.append(item.ethnic_group)
    cmp = lambda a, b: unaccent_icmp(a.value, b.value)
    groups.sort(cmp)
    return groups

def field_label(

cls, field_name=None)

@classmethod
def field_label(cls, field_name=None):
    if field_name:
        try:
            return cls._meta.get_field(field_name).verbose_name
        except FieldDoesNotExist:
            try:
                return getattr(cls, field_name).verbose_name
            except AttributeError:
                return field_name
    else:
        return cls._meta.verbose_name

def full_clean(

self, exclude=None, validate_unique=True)

Calls clean_fields, clean, and validate_unique, on the model, and raises a ValidationError for any errors that occurred.

def full_clean(self, exclude=None, validate_unique=True):
    """
    Calls clean_fields, clean, and validate_unique, on the model,
    and raises a ``ValidationError`` for any errors that occurred.
    """
    errors = {}
    if exclude is None:
        exclude = []
    try:
        self.clean_fields(exclude=exclude)
    except ValidationError as e:
        errors = e.update_error_dict(errors)
    # Form.clean() is run even if other validation fails, so do the
    # same with Model.clean() for consistency.
    try:
        self.clean()
    except ValidationError as e:
        errors = e.update_error_dict(errors)
    # Run unique checks, but only for fields that passed validation.
    if validate_unique:
        for name in errors.keys():
            if name != NON_FIELD_ERRORS and name not in exclude:
                exclude.append(name)
        try:
            self.validate_unique(exclude=exclude)
        except ValidationError as e:
            errors = e.update_error_dict(errors)
    if errors:
        raise ValidationError(errors)

def get_dirty_fields(

self, check_relationship=False, check_m2m=None, verbose=False)

def get_dirty_fields(self, check_relationship=False, check_m2m=None, verbose=False):
    if self._state.adding:
        # If the object has not yet been saved in the database, all fields are considered dirty
        # for consistency (see https://github.com/romgar/django-dirtyfields/issues/65 for more details)
        pk_specified = self.pk is not None
        initial_dict = self._as_dict(check_relationship, include_primary_key=pk_specified)
        return initial_dict
    if check_m2m is not None and not self.ENABLE_M2M_CHECK:
        raise ValueError("You can't check m2m fields if ENABLE_M2M_CHECK is set to False")
    modified_fields = compare_states(self._as_dict(check_relationship),
                                     self._original_state,
                                     self.compare_function)
    if check_m2m:
        modified_m2m_fields = compare_states(check_m2m,
                                             self._original_m2m_state,
                                             self.compare_function)
        modified_fields.update(modified_m2m_fields)
    if not verbose:
        # Keeps backward compatibility with previous function return
        modified_fields = {key: value['saved'] for key, value in modified_fields.items()}
    return modified_fields

def get_dom_name(

cls)

Convert the class name to a DOM element name

@classmethod
def get_dom_name(cls):
    "Convert the class name to a DOM element name"
    clsname = cls.__name__
    return clsname[0].lower() + clsname[1:]

def get_json(

self)

def get_json(self):
    import json
    return json.dumps(self.to_dict_with_more())

def get_public_access_display(

*moreargs, **morekwargs)

def _curried(*moreargs, **morekwargs):
    return _curried_func(*(args + moreargs), **dict(kwargs, **morekwargs))

def get_revision(

self)

def get_revision(self):
    revisions = Revision.objects.filter(element_type=self.element_type, element_id=self.id).order_by('-time')
    if revisions:
        return revisions[0]
    else:
        return None

def get_url(

self)

def get_url(self):
    return get_full_url(reverse('telemeta-collection-detail', kwargs={'public_id':self.pk}))

def is_dirty(

self, check_relationship=False, check_m2m=None)

def is_dirty(self, check_relationship=False, check_m2m=None):
    return {} != self.get_dirty_fields(check_relationship=check_relationship,
                                       check_m2m=check_m2m)

def is_valid_collection_code(

value)

Check if the collection code is well formed

def is_valid_collection_code(value):
    "Check if the collection code is well formed"
    regex = '^' + collection_code_regex + '$'
    if not re.match(regex, value):
        raise ValidationError(u'%s is not a valid collection code' % value)

def main_countries(

self)

Return the main countries of the items (no aliases or ancestors)

def main_countries(self):
    "Return the main countries of the items (no aliases or ancestors)"
    countries = []
    for item in self.items.filter(location__isnull=False):
        if not item.location in countries:
            countries.append(item.location)
    countries.sort(self.__name_cmp)
    return countries

def prepare_database_save(

self, unused)

def prepare_database_save(self, unused):
    if self.pk is None:
        raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
    return self.pk

def public_access_label(

self)

def public_access_label(self):
    if self.public_access == 'metadata':
        return _('Metadata only')
    elif self.public_access == 'full':
        return _('Sound and metadata')
    return _('Private data')

def required_fields(

cls)

@classmethod
def required_fields(cls):
    required = []
    for field in cls._meta.fields:
        if not field.blank:
            required.append(field)
    return required

def save(

self, force_insert=False, force_update=False, *args, **kwargs)

def save(self, force_insert=False, force_update=False, *args, **kwargs):
    super(MediaCollection, self).save(force_insert, force_update, *args, **kwargs)

def save_base(

self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None)

Handles the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending.

The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading.

def save_base(self, raw=False, force_insert=False,
              force_update=False, using=None, update_fields=None):
    """
    Handles the parts of saving which should be done only once per save,
    yet need to be done in raw saves, too. This includes some sanity
    checks and signal sending.
    The 'raw' argument is telling save_base not to save any parent
    models and not to do any changes to the values before save. This
    is used by fixture loading.
    """
    using = using or router.db_for_write(self.__class__, instance=self)
    assert not (force_insert and (force_update or update_fields))
    assert update_fields is None or len(update_fields) > 0
    cls = origin = self.__class__
    # Skip proxies, but keep the origin as the proxy model.
    if cls._meta.proxy:
        cls = cls._meta.concrete_model
    meta = cls._meta
    if not meta.auto_created:
        signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
                              update_fields=update_fields)
    with transaction.commit_on_success_unless_managed(using=using, savepoint=False):
        if not raw:
            self._save_parents(cls, using, update_fields)
        updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
    # Store the database on which the object was saved
    self._state.db = using
    # Once saved, this is no longer a to-be-added instance.
    self._state.adding = False
    # Signal that the save is complete
    if not meta.auto_created:
        signals.post_save.send(sender=origin, instance=self, created=(not updated),
                               update_fields=update_fields, raw=raw, using=using)

def save_dirty_fields(

self)

def save_dirty_fields(self):
    dirty_fields = self.get_dirty_fields(check_relationship=True)
    save_specific_fields(self, dirty_fields)

def serializable_value(

self, field_name)

Returns the value of the field name for this instance. If the field is a foreign key, returns the id value, instead of the object. If there's no Field object with this name on the model, the model attribute's value is returned directly.

Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method.

def serializable_value(self, field_name):
    """
    Returns the value of the field name for this instance. If the field is
    a foreign key, returns the id value, instead of the object. If there's
    no Field object with this name on the model, the model attribute's
    value is returned directly.
    Used to serialize a field's value (in the serializer, or form output,
    for example). Normally, you would just access the attribute directly
    and not use this method.
    """
    try:
        field = self._meta.get_field_by_name(field_name)[0]
    except FieldDoesNotExist:
        return getattr(self, field_name)
    return getattr(self, field.attname)

def set_revision(

self, user)

Save a media object and add a revision

def set_revision(self, user):
    "Save a media object and add a revision"
    Revision.touch(self, user)

def to_dict(

self)

Return model fields as a dict of name/value pairs

def to_dict(self):
    "Return model fields as a dict of name/value pairs"
    fields_dict = {}
    for field in self._meta.fields:
        fields_dict[field.name] = getattr(self, field.name)
    return fields_dict

def to_dict_with_more(

self)

def to_dict_with_more(self):
    # metadata = model_to_dict(self, fields=[], exclude=self.exclude)
    metadata = self.to_dict()
    for key in self.exclude:
        if key in metadata.keys():
            del metadata[key]
    metadata['url'] = get_full_url(reverse('telemeta-collection-detail', kwargs={'public_id':self.pk}))
    metadata['doc_status'] = self.document_status()
    metadata['countries'] = ';'.join([location.name for location in self.main_countries()])
    metadata['ethnic_groups'] = ';'.join([group.value for group in self.ethnic_groups()])
    revision = self.get_revision()
    if revision:
        metadata['last_modification_date'] = unicode(revision.time)
    metadata['computed_duration'] = unicode(self.computed_duration())
    metadata['computed_size'] = unicode(self.computed_size())
    metadata['number_of_items'] = unicode(self.items.all().count())
    metadata['approx_duration'] = unicode(self.approx_duration)
    i = 0
    for media in self.related.all():
        metadata['related_media_title' + '_' + str(i)] = media.title
        if media.url:
            tag = 'related_media_url' + '_' + str(i)
            metadata[tag] = media.url
        elif media.url:
            metadata[tag] = get_full_url(reverse('telemeta-collection-related',
                                        kwargs={'public_id': self.public_id, 'media_id': media.id}))
        i += 1
    # One ID only
    identifiers = self.identifiers.all()
    if identifiers:
        identifier = identifiers[0]
        metadata['identifier_id'] = identifier.identifier
        metadata['identifier_type'] = identifier.type
        metadata['identifier_date'] = unicode(identifier.date_last)
        metadata['identifier_note'] = identifier.notes
    # All IDs
    # i = 0
    # for indentifier in self.identifiers.all():
    #     metadata['identifier' + '_' + str(i)] = identifier.identifier
    #     metadata['identifier_type' + '_' + str(i)] = identifier.type
    #     metadata['identifier_date_last' + '_' + str(i)] = unicode(identifier.date_last)
    #     metadata['identifier_notes' + '_' + str(i)] = identifier.notes
    #     i += 1
    return metadata

def to_dom(

self)

Return the DOM representation of this media object

def to_dom(self):
    "Return the DOM representation of this media object"
    impl = getDOMImplementation()
    root = self.get_dom_name()
    doc = impl.createDocument(None, root, None)
    top = doc.documentElement
    top.setAttribute("id", str(self.pk))
    fields = self.to_dict()
    for name, value in fields.iteritems():
        element = doc.createElement(self.get_dom_field_name(name))
        if isinstance(value, EnhancedModel):
            element.setAttribute('key', str(value.pk))
        value = unicode(value)
        element.appendChild(doc.createTextNode(value))
        top.appendChild(element)
    return doc

def to_list(

self)

Return model fields as a list

def to_list(self):
    "Return model fields as a list"
    fields_list = []
    for field in self._meta.fields:
        fields_list.append({'name': field.name, 'value': unicode(getattr(self, field.name))})
    return fields_list

def to_row(

self, tags)

def to_row(self, tags):
    row = []
    _dict = self.to_dict_with_more()
    for tag in tags:
        if tag in _dict.keys():
            row.append(_dict[tag])
        else:
            row.append('')
    return row

def unique_error_message(

self, model_class, unique_check)

def unique_error_message(self, model_class, unique_check):
    opts = model_class._meta
    model_name = capfirst(opts.verbose_name)
    # A unique field
    if len(unique_check) == 1:
        field_name = unique_check[0]
        field = opts.get_field(field_name)
        field_label = capfirst(field.verbose_name)
        # Insert the error into the error dict, very sneaky
        return field.error_messages['unique'] % {
            'model_name': six.text_type(model_name),
            'field_label': six.text_type(field_label)
        }
    # unique_together
    else:
        field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
        field_labels = get_text_list(field_labels, _('and'))
        return _("%(model_name)s with this %(field_label)s already exists.") % {
            'model_name': six.text_type(model_name),
            'field_label': six.text_type(field_labels)
        }

def validate_unique(

self, exclude=None)

Checks unique constraints on the model and raises ValidationError if any failed.

def validate_unique(self, exclude=None):
    """
    Checks unique constraints on the model and raises ``ValidationError``
    if any failed.
    """
    unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
    errors = self._perform_unique_checks(unique_checks)
    date_errors = self._perform_date_checks(date_checks)
    for k, v in date_errors.items():
        errors.setdefault(k, []).extend(v)
    if errors:
        raise ValidationError(errors)

class MediaCollectionIdentifier

Collection identifier

class MediaCollectionIdentifier(Identifier):
    """Collection identifier"""

    collection = ForeignKey(MediaCollection, related_name="identifiers", verbose_name=_('collection'))

    class Meta(MetaCore):
        db_table = 'media_collection_identifier'
        verbose_name = _('collection identifier')
        verbose_name_plural = _('collection identifiers')
        unique_together = ('identifier', 'collection')

Ancestors (in MRO)

  • MediaCollectionIdentifier
  • telemeta.models.identifier.Identifier
  • telemeta.models.core.ModelCore
  • telemeta.models.core.EnhancedModel
  • django.db.models.base.Model
  • dirtyfields.dirtyfields.DirtyFieldsMixin
  • __builtin__.object

Class variables

var DoesNotExist

var ENABLE_M2M_CHECK

var Meta

var MultipleObjectsReturned

var collection

var compare_function

var objects

var type

Static methods

def get_dom_field_name(

field_name)

Convert the class name to a DOM element name

@staticmethod
def get_dom_field_name(field_name):
    "Convert the class name to a DOM element name"
    tokens = field_name.split('_')
    name = tokens[0]
    for t in tokens[1:]:
        name += t[0].upper() + t[1:]
    return name

Instance variables

var pk

Methods

def __init__(

self, *args, **kwargs)

def __init__(self, *args, **kwargs):
    signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
    # Set up the storage for instance state
    self._state = ModelState()
    # There is a rather weird disparity here; if kwargs, it's set, then args
    # overrides it. It should be one or the other; don't duplicate the work
    # The reason for the kwargs check is that standard iterator passes in by
    # args, and instantiation for iteration is 33% faster.
    args_len = len(args)
    if args_len > len(self._meta.concrete_fields):
        # Daft, but matches old exception sans the err msg.
        raise IndexError("Number of args exceeds number of fields")
    if not kwargs:
        fields_iter = iter(self._meta.concrete_fields)
        # The ordering of the zip calls matter - zip throws StopIteration
        # when an iter throws it. So if the first iter throws it, the second
        # is *not* consumed. We rely on this, so don't change the order
        # without changing the logic.
        for val, field in zip(args, fields_iter):
            setattr(self, field.attname, val)
    else:
        # Slower, kwargs-ready version.
        fields_iter = iter(self._meta.fields)
        for val, field in zip(args, fields_iter):
            setattr(self, field.attname, val)
            kwargs.pop(field.name, None)
            # Maintain compatibility with existing calls.
            if isinstance(field.rel, ManyToOneRel):
                kwargs.pop(field.attname, None)
    # Now we're left with the unprocessed fields that *must* come from
    # keywords, or default.
    for field in fields_iter:
        is_related_object = False
        # This slightly odd construct is so that we can access any
        # data-descriptor object (DeferredAttribute) without triggering its
        # __get__ method.
        if (field.attname not in kwargs and
                (isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
                 or field.column is None)):
            # This field will be populated on request.
            continue
        if kwargs:
            if isinstance(field.rel, ForeignObjectRel):
                try:
                    # Assume object instance was passed in.
                    rel_obj = kwargs.pop(field.name)
                    is_related_object = True
                except KeyError:
                    try:
                        # Object instance wasn't passed in -- must be an ID.
                        val = kwargs.pop(field.attname)
                    except KeyError:
                        val = field.get_default()
                else:
                    # Object instance was passed in. Special case: You can
                    # pass in "None" for related objects if it's allowed.
                    if rel_obj is None and field.null:
                        val = None
            else:
                try:
                    val = kwargs.pop(field.attname)
                except KeyError:
                    # This is done with an exception rather than the
                    # default argument on pop because we don't want
                    # get_default() to be evaluated, and then not used.
                    # Refs #12057.
                    val = field.get_default()
        else:
            val = field.get_default()
        if is_related_object:
            # If we are passed a related instance, set it using the
            # field.name instead of field.attname (e.g. "user" instead of
            # "user_id") so that the object gets properly cached (and type
            # checked) by the RelatedObjectDescriptor.
            setattr(self, field.name, rel_obj)
        else:
            setattr(self, field.attname, val)
    if kwargs:
        for prop in list(kwargs):
            try:
                if isinstance(getattr(self.__class__, prop), property):
                    setattr(self, prop, kwargs.pop(prop))
            except AttributeError:
                pass
        if kwargs:
            raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
    super(Model, self).__init__()
    signals.post_init.send(sender=self.__class__, instance=self)

def clean(

self)

Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS.

def clean(self):
    """
    Hook for doing any extra model-wide validation after clean() has been
    called on every field by self.clean_fields. Any ValidationError raised
    by this method will not be associated with a particular field; it will
    have a special-case association with the field defined by NON_FIELD_ERRORS.
    """
    pass

def clean_fields(

self, exclude=None)

Cleans all fields and raises a ValidationError containing message_dict of all validation errors if any occur.

def clean_fields(self, exclude=None):
    """
    Cleans all fields and raises a ValidationError containing message_dict
    of all validation errors if any occur.
    """
    if exclude is None:
        exclude = []
    errors = {}
    for f in self._meta.fields:
        if f.name in exclude:
            continue
        # Skip validation for empty fields with blank=True. The developer
        # is responsible for making sure they have a valid value.
        raw_value = getattr(self, f.attname)
        if f.blank and raw_value in f.empty_values:
            continue
        try:
            setattr(self, f.attname, f.clean(raw_value, self))
        except ValidationError as e:
            errors[f.name] = e.error_list
    if errors:
        raise ValidationError(errors)

def date_error_message(

self, lookup_type, field, unique_for)

def date_error_message(self, lookup_type, field, unique_for):
    opts = self._meta
    return _("%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
        'field_name': six.text_type(capfirst(opts.get_field(field).verbose_name)),
        'date_field': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
        'lookup': lookup_type,
    }

def delete(

self)

def delete(self):
    if not self.pk:
        raise Exception("Can't delete without a primary key")
    self.__class__.objects.filter(pk=self.pk).delete()

def field_label(

cls, field_name=None)

@classmethod
def field_label(cls, field_name=None):
    if field_name:
        try:
            return cls._meta.get_field(field_name).verbose_name
        except FieldDoesNotExist:
            try:
                return getattr(cls, field_name).verbose_name
            except AttributeError:
                return field_name
    else:
        return cls._meta.verbose_name

def full_clean(

self, exclude=None, validate_unique=True)

Calls clean_fields, clean, and validate_unique, on the model, and raises a ValidationError for any errors that occurred.

def full_clean(self, exclude=None, validate_unique=True):
    """
    Calls clean_fields, clean, and validate_unique, on the model,
    and raises a ``ValidationError`` for any errors that occurred.
    """
    errors = {}
    if exclude is None:
        exclude = []
    try:
        self.clean_fields(exclude=exclude)
    except ValidationError as e:
        errors = e.update_error_dict(errors)
    # Form.clean() is run even if other validation fails, so do the
    # same with Model.clean() for consistency.
    try:
        self.clean()
    except ValidationError as e:
        errors = e.update_error_dict(errors)
    # Run unique checks, but only for fields that passed validation.
    if validate_unique:
        for name in errors.keys():
            if name != NON_FIELD_ERRORS and name not in exclude:
                exclude.append(name)
        try:
            self.validate_unique(exclude=exclude)
        except ValidationError as e:
            errors = e.update_error_dict(errors)
    if errors:
        raise ValidationError(errors)

def get_dirty_fields(

self, check_relationship=False, check_m2m=None, verbose=False)

def get_dirty_fields(self, check_relationship=False, check_m2m=None, verbose=False):
    if self._state.adding:
        # If the object has not yet been saved in the database, all fields are considered dirty
        # for consistency (see https://github.com/romgar/django-dirtyfields/issues/65 for more details)
        pk_specified = self.pk is not None
        initial_dict = self._as_dict(check_relationship, include_primary_key=pk_specified)
        return initial_dict
    if check_m2m is not None and not self.ENABLE_M2M_CHECK:
        raise ValueError("You can't check m2m fields if ENABLE_M2M_CHECK is set to False")
    modified_fields = compare_states(self._as_dict(check_relationship),
                                     self._original_state,
                                     self.compare_function)
    if check_m2m:
        modified_m2m_fields = compare_states(check_m2m,
                                             self._original_m2m_state,
                                             self.compare_function)
        modified_fields.update(modified_m2m_fields)
    if not verbose:
        # Keeps backward compatibility with previous function return
        modified_fields = {key: value['saved'] for key, value in modified_fields.items()}
    return modified_fields

def get_dom_name(

cls)

Convert the class name to a DOM element name

@classmethod
def get_dom_name(cls):
    "Convert the class name to a DOM element name"
    clsname = cls.__name__
    return clsname[0].lower() + clsname[1:]

def is_dirty(

self, check_relationship=False, check_m2m=None)

def is_dirty(self, check_relationship=False, check_m2m=None):
    return {} != self.get_dirty_fields(check_relationship=check_relationship,
                                       check_m2m=check_m2m)

def prepare_database_save(

self, unused)

def prepare_database_save(self, unused):
    if self.pk is None:
        raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
    return self.pk

def required_fields(

cls)

@classmethod
def required_fields(cls):
    required = []
    for field in cls._meta.fields:
        if not field.blank:
            required.append(field)
    return required

def save(

self, force_insert=False, force_update=False, *args, **kwargs)

def save(self, force_insert=False, force_update=False, *args, **kwargs):
    required = self.required_fields()
    for field in required:
        if not getattr(self, field.name):
            raise RequiredFieldError(self, field)
    super(ModelCore, self).save(force_insert, force_update, *args, **kwargs)

def save_base(

self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None)

Handles the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending.

The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading.

def save_base(self, raw=False, force_insert=False,
              force_update=False, using=None, update_fields=None):
    """
    Handles the parts of saving which should be done only once per save,
    yet need to be done in raw saves, too. This includes some sanity
    checks and signal sending.
    The 'raw' argument is telling save_base not to save any parent
    models and not to do any changes to the values before save. This
    is used by fixture loading.
    """
    using = using or router.db_for_write(self.__class__, instance=self)
    assert not (force_insert and (force_update or update_fields))
    assert update_fields is None or len(update_fields) > 0
    cls = origin = self.__class__
    # Skip proxies, but keep the origin as the proxy model.
    if cls._meta.proxy:
        cls = cls._meta.concrete_model
    meta = cls._meta
    if not meta.auto_created:
        signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
                              update_fields=update_fields)
    with transaction.commit_on_success_unless_managed(using=using, savepoint=False):
        if not raw:
            self._save_parents(cls, using, update_fields)
        updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
    # Store the database on which the object was saved
    self._state.db = using
    # Once saved, this is no longer a to-be-added instance.
    self._state.adding = False
    # Signal that the save is complete
    if not meta.auto_created:
        signals.post_save.send(sender=origin, instance=self, created=(not updated),
                               update_fields=update_fields, raw=raw, using=using)

def save_dirty_fields(

self)

def save_dirty_fields(self):
    dirty_fields = self.get_dirty_fields(check_relationship=True)
    save_specific_fields(self, dirty_fields)

def serializable_value(

self, field_name)

Returns the value of the field name for this instance. If the field is a foreign key, returns the id value, instead of the object. If there's no Field object with this name on the model, the model attribute's value is returned directly.

Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method.

def serializable_value(self, field_name):
    """
    Returns the value of the field name for this instance. If the field is
    a foreign key, returns the id value, instead of the object. If there's
    no Field object with this name on the model, the model attribute's
    value is returned directly.
    Used to serialize a field's value (in the serializer, or form output,
    for example). Normally, you would just access the attribute directly
    and not use this method.
    """
    try:
        field = self._meta.get_field_by_name(field_name)[0]
    except FieldDoesNotExist:
        return getattr(self, field_name)
    return getattr(self, field.attname)

def to_dict(

self)

Return model fields as a dict of name/value pairs

def to_dict(self):
    "Return model fields as a dict of name/value pairs"
    fields_dict = {}
    for field in self._meta.fields:
        fields_dict[field.name] = getattr(self, field.name)
    return fields_dict

def to_dom(

self)

Return the DOM representation of this media object

def to_dom(self):
    "Return the DOM representation of this media object"
    impl = getDOMImplementation()
    root = self.get_dom_name()
    doc = impl.createDocument(None, root, None)
    top = doc.documentElement
    top.setAttribute("id", str(self.pk))
    fields = self.to_dict()
    for name, value in fields.iteritems():
        element = doc.createElement(self.get_dom_field_name(name))
        if isinstance(value, EnhancedModel):
            element.setAttribute('key', str(value.pk))
        value = unicode(value)
        element.appendChild(doc.createTextNode(value))
        top.appendChild(element)
    return doc

def to_list(

self)

Return model fields as a list

def to_list(self):
    "Return model fields as a list"
    fields_list = []
    for field in self._meta.fields:
        fields_list.append({'name': field.name, 'value': unicode(getattr(self, field.name))})
    return fields_list

def unique_error_message(

self, model_class, unique_check)

def unique_error_message(self, model_class, unique_check):
    opts = model_class._meta
    model_name = capfirst(opts.verbose_name)
    # A unique field
    if len(unique_check) == 1:
        field_name = unique_check[0]
        field = opts.get_field(field_name)
        field_label = capfirst(field.verbose_name)
        # Insert the error into the error dict, very sneaky
        return field.error_messages['unique'] % {
            'model_name': six.text_type(model_name),
            'field_label': six.text_type(field_label)
        }
    # unique_together
    else:
        field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
        field_labels = get_text_list(field_labels, _('and'))
        return _("%(model_name)s with this %(field_label)s already exists.") % {
            'model_name': six.text_type(model_name),
            'field_label': six.text_type(field_labels)
        }

def validate_unique(

self, exclude=None)

Checks unique constraints on the model and raises ValidationError if any failed.

def validate_unique(self, exclude=None):
    """
    Checks unique constraints on the model and raises ``ValidationError``
    if any failed.
    """
    unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
    errors = self._perform_unique_checks(unique_checks)
    date_errors = self._perform_date_checks(date_checks)
    for k, v in date_errors.items():
        errors.setdefault(k, []).extend(v)
    if errors:
        raise ValidationError(errors)

class MediaCollectionRelated

Collection related media

class MediaCollectionRelated(MediaRelated):
    "Collection related media"

    collection      = ForeignKey('MediaCollection', related_name="related", verbose_name=_('collection'))

    class Meta(MetaCore):
        db_table = 'media_collection_related'
        verbose_name = _('collection related media')
        verbose_name_plural = _('collection related media')

Ancestors (in MRO)

  • MediaCollectionRelated
  • telemeta.models.resource.MediaRelated
  • telemeta.models.resource.MediaResource
  • telemeta.models.core.ModelCore
  • telemeta.models.core.EnhancedModel
  • django.db.models.base.Model
  • dirtyfields.dirtyfields.DirtyFieldsMixin
  • __builtin__.object

Class variables

var DoesNotExist

var ENABLE_M2M_CHECK

var Meta

var MultipleObjectsReturned

var collection

var compare_function

var element_type

var objects

Static methods

def get_dom_field_name(

field_name)

Convert the class name to a DOM element name

@staticmethod
def get_dom_field_name(field_name):
    "Convert the class name to a DOM element name"
    tokens = field_name.split('_')
    name = tokens[0]
    for t in tokens[1:]:
        name += t[0].upper() + t[1:]
    return name

Instance variables

var pk

Methods

def __init__(

self, *args, **kwargs)

def __init__(self, *args, **kwargs):
    signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
    # Set up the storage for instance state
    self._state = ModelState()
    # There is a rather weird disparity here; if kwargs, it's set, then args
    # overrides it. It should be one or the other; don't duplicate the work
    # The reason for the kwargs check is that standard iterator passes in by
    # args, and instantiation for iteration is 33% faster.
    args_len = len(args)
    if args_len > len(self._meta.concrete_fields):
        # Daft, but matches old exception sans the err msg.
        raise IndexError("Number of args exceeds number of fields")
    if not kwargs:
        fields_iter = iter(self._meta.concrete_fields)
        # The ordering of the zip calls matter - zip throws StopIteration
        # when an iter throws it. So if the first iter throws it, the second
        # is *not* consumed. We rely on this, so don't change the order
        # without changing the logic.
        for val, field in zip(args, fields_iter):
            setattr(self, field.attname, val)
    else:
        # Slower, kwargs-ready version.
        fields_iter = iter(self._meta.fields)
        for val, field in zip(args, fields_iter):
            setattr(self, field.attname, val)
            kwargs.pop(field.name, None)
            # Maintain compatibility with existing calls.
            if isinstance(field.rel, ManyToOneRel):
                kwargs.pop(field.attname, None)
    # Now we're left with the unprocessed fields that *must* come from
    # keywords, or default.
    for field in fields_iter:
        is_related_object = False
        # This slightly odd construct is so that we can access any
        # data-descriptor object (DeferredAttribute) without triggering its
        # __get__ method.
        if (field.attname not in kwargs and
                (isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
                 or field.column is None)):
            # This field will be populated on request.
            continue
        if kwargs:
            if isinstance(field.rel, ForeignObjectRel):
                try:
                    # Assume object instance was passed in.
                    rel_obj = kwargs.pop(field.name)
                    is_related_object = True
                except KeyError:
                    try:
                        # Object instance wasn't passed in -- must be an ID.
                        val = kwargs.pop(field.attname)
                    except KeyError:
                        val = field.get_default()
                else:
                    # Object instance was passed in. Special case: You can
                    # pass in "None" for related objects if it's allowed.
                    if rel_obj is None and field.null:
                        val = None
            else:
                try:
                    val = kwargs.pop(field.attname)
                except KeyError:
                    # This is done with an exception rather than the
                    # default argument on pop because we don't want
                    # get_default() to be evaluated, and then not used.
                    # Refs #12057.
                    val = field.get_default()
        else:
            val = field.get_default()
        if is_related_object:
            # If we are passed a related instance, set it using the
            # field.name instead of field.attname (e.g. "user" instead of
            # "user_id") so that the object gets properly cached (and type
            # checked) by the RelatedObjectDescriptor.
            setattr(self, field.name, rel_obj)
        else:
            setattr(self, field.attname, val)
    if kwargs:
        for prop in list(kwargs):
            try:
                if isinstance(getattr(self.__class__, prop), property):
                    setattr(self, prop, kwargs.pop(prop))
            except AttributeError:
                pass
        if kwargs:
            raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
    super(Model, self).__init__()
    signals.post_init.send(sender=self.__class__, instance=self)

def clean(

self)

Hook for doing any extra model-wide validation after clean() has been called on every field by self.clean_fields. Any ValidationError raised by this method will not be associated with a particular field; it will have a special-case association with the field defined by NON_FIELD_ERRORS.

def clean(self):
    """
    Hook for doing any extra model-wide validation after clean() has been
    called on every field by self.clean_fields. Any ValidationError raised
    by this method will not be associated with a particular field; it will
    have a special-case association with the field defined by NON_FIELD_ERRORS.
    """
    pass

def clean_fields(

self, exclude=None)

Cleans all fields and raises a ValidationError containing message_dict of all validation errors if any occur.

def clean_fields(self, exclude=None):
    """
    Cleans all fields and raises a ValidationError containing message_dict
    of all validation errors if any occur.
    """
    if exclude is None:
        exclude = []
    errors = {}
    for f in self._meta.fields:
        if f.name in exclude:
            continue
        # Skip validation for empty fields with blank=True. The developer
        # is responsible for making sure they have a valid value.
        raw_value = getattr(self, f.attname)
        if f.blank and raw_value in f.empty_values:
            continue
        try:
            setattr(self, f.attname, f.clean(raw_value, self))
        except ValidationError as e:
            errors[f.name] = e.error_list
    if errors:
        raise ValidationError(errors)

def date_error_message(

self, lookup_type, field, unique_for)

def date_error_message(self, lookup_type, field, unique_for):
    opts = self._meta
    return _("%(field_name)s must be unique for %(date_field)s %(lookup)s.") % {
        'field_name': six.text_type(capfirst(opts.get_field(field).verbose_name)),
        'date_field': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
        'lookup': lookup_type,
    }

def delete(

self)

def delete(self):
    if not self.pk:
        raise Exception("Can't delete without a primary key")
    self.__class__.objects.filter(pk=self.pk).delete()

def field_label(

cls, field_name=None)

@classmethod
def field_label(cls, field_name=None):
    if field_name:
        try:
            return cls._meta.get_field(field_name).verbose_name
        except FieldDoesNotExist:
            try:
                return getattr(cls, field_name).verbose_name
            except AttributeError:
                return field_name
    else:
        return cls._meta.verbose_name

def full_clean(

self, exclude=None, validate_unique=True)

Calls clean_fields, clean, and validate_unique, on the model, and raises a ValidationError for any errors that occurred.

def full_clean(self, exclude=None, validate_unique=True):
    """
    Calls clean_fields, clean, and validate_unique, on the model,
    and raises a ``ValidationError`` for any errors that occurred.
    """
    errors = {}
    if exclude is None:
        exclude = []
    try:
        self.clean_fields(exclude=exclude)
    except ValidationError as e:
        errors = e.update_error_dict(errors)
    # Form.clean() is run even if other validation fails, so do the
    # same with Model.clean() for consistency.
    try:
        self.clean()
    except ValidationError as e:
        errors = e.update_error_dict(errors)
    # Run unique checks, but only for fields that passed validation.
    if validate_unique:
        for name in errors.keys():
            if name != NON_FIELD_ERRORS and name not in exclude:
                exclude.append(name)
        try:
            self.validate_unique(exclude=exclude)
        except ValidationError as e:
            errors = e.update_error_dict(errors)
    if errors:
        raise ValidationError(errors)

def get_dirty_fields(

self, check_relationship=False, check_m2m=None, verbose=False)

def get_dirty_fields(self, check_relationship=False, check_m2m=None, verbose=False):
    if self._state.adding:
        # If the object has not yet been saved in the database, all fields are considered dirty
        # for consistency (see https://github.com/romgar/django-dirtyfields/issues/65 for more details)
        pk_specified = self.pk is not None
        initial_dict = self._as_dict(check_relationship, include_primary_key=pk_specified)
        return initial_dict
    if check_m2m is not None and not self.ENABLE_M2M_CHECK:
        raise ValueError("You can't check m2m fields if ENABLE_M2M_CHECK is set to False")
    modified_fields = compare_states(self._as_dict(check_relationship),
                                     self._original_state,
                                     self.compare_function)
    if check_m2m:
        modified_m2m_fields = compare_states(check_m2m,
                                             self._original_m2m_state,
                                             self.compare_function)
        modified_fields.update(modified_m2m_fields)
    if not verbose:
        # Keeps backward compatibility with previous function return
        modified_fields = {key: value['saved'] for key, value in modified_fields.items()}
    return modified_fields

def get_dom_name(

cls)

Convert the class name to a DOM element name

@classmethod
def get_dom_name(cls):
    "Convert the class name to a DOM element name"
    clsname = cls.__name__
    return clsname[0].lower() + clsname[1:]

def get_revision(

self)

def get_revision(self):
    revisions = Revision.objects.filter(element_type=self.element_type, element_id=self.id).order_by('-time')
    if revisions:
        return revisions[0]
    else:
        return None

def is_dirty(

self, check_relationship=False, check_m2m=None)

def is_dirty(self, check_relationship=False, check_m2m=None):
    return {} != self.get_dirty_fields(check_relationship=check_relationship,
                                       check_m2m=check_m2m)

def is_image(

self)

def is_image(self):
    is_url_image = False
    if self.url:
        url_types = ['.png', '.jpg', '.gif', '.jpeg']
        for type in url_types:
            if type in self.url or type.upper() in self.url:
                is_url_image = True
    return 'image' in self.mime_type or is_url_image

def is_kdenlive_session(

self)

def is_kdenlive_session(self):
    if self.file:
        return '.kdenlive' in self.file.path
    else:
        return False

def prepare_database_save(

self, unused)

def prepare_database_save(self, unused):
    if self.pk is None:
        raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
    return self.pk

def public_access_label(

self)

def public_access_label(self):
    if self.public_access == 'metadata':
        return _('Metadata only')
    elif self.public_access == 'full':
        return _('Sound and metadata')
    return _('Private data')

def required_fields(

cls)

@classmethod
def required_fields(cls):
    required = []
    for field in cls._meta.fields:
        if not field.blank:
            required.append(field)
    return required

def save(

self, *args, **kwargs)

def save(self, *args, **kwargs):
    super(MediaRelated, self).save(*args, **kwargs)

def save_base(

self, raw=False, force_insert=False, force_update=False, using=None, update_fields=None)

Handles the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending.

The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading.

def save_base(self, raw=False, force_insert=False,
              force_update=False, using=None, update_fields=None):
    """
    Handles the parts of saving which should be done only once per save,
    yet need to be done in raw saves, too. This includes some sanity
    checks and signal sending.
    The 'raw' argument is telling save_base not to save any parent
    models and not to do any changes to the values before save. This
    is used by fixture loading.
    """
    using = using or router.db_for_write(self.__class__, instance=self)
    assert not (force_insert and (force_update or update_fields))
    assert update_fields is None or len(update_fields) > 0
    cls = origin = self.__class__
    # Skip proxies, but keep the origin as the proxy model.
    if cls._meta.proxy:
        cls = cls._meta.concrete_model
    meta = cls._meta
    if not meta.auto_created:
        signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
                              update_fields=update_fields)
    with transaction.commit_on_success_unless_managed(using=using, savepoint=False):
        if not raw:
            self._save_parents(cls, using, update_fields)
        updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
    # Store the database on which the object was saved
    self._state.db = using
    # Once saved, this is no longer a to-be-added instance.
    self._state.adding = False
    # Signal that the save is complete
    if not meta.auto_created:
        signals.post_save.send(sender=origin, instance=self, created=(not updated),
                               update_fields=update_fields, raw=raw, using=using)

def save_dirty_fields(

self)

def save_dirty_fields(self):
    dirty_fields = self.get_dirty_fields(check_relationship=True)
    save_specific_fields(self, dirty_fields)

def serializable_value(

self, field_name)

Returns the value of the field name for this instance. If the field is a foreign key, returns the id value, instead of the object. If there's no Field object with this name on the model, the model attribute's value is returned directly.

Used to serialize a field's value (in the serializer, or form output, for example). Normally, you would just access the attribute directly and not use this method.

def serializable_value(self, field_name):
    """
    Returns the value of the field name for this instance. If the field is
    a foreign key, returns the id value, instead of the object. If there's
    no Field object with this name on the model, the model attribute's
    value is returned directly.
    Used to serialize a field's value (in the serializer, or form output,
    for example). Normally, you would just access the attribute directly
    and not use this method.
    """
    try:
        field = self._meta.get_field_by_name(field_name)[0]
    except FieldDoesNotExist:
        return getattr(self, field_name)
    return getattr(self, field.attname)

def set_mime_type(

self)

def set_mime_type(self):
    if self.file:
        self.mime_type = mimetypes.guess_type(self.file.path)[0]

def set_revision(

self, user)

Save a media object and add a revision

def set_revision(self, user):
    "Save a media object and add a revision"
    Revision.touch(self, user)

def to_dict(

self)

Return model fields as a dict of name/value pairs

def to_dict(self):
    "Return model fields as a dict of name/value pairs"
    fields_dict = {}
    for field in self._meta.fields:
        fields_dict[field.name] = getattr(self, field.name)
    return fields_dict

def to_dom(

self)

Return the DOM representation of this media object

def to_dom(self):
    "Return the DOM representation of this media object"
    impl = getDOMImplementation()
    root = self.get_dom_name()
    doc = impl.createDocument(None, root, None)
    top = doc.documentElement
    top.setAttribute("id", str(self.pk))
    fields = self.to_dict()
    for name, value in fields.iteritems():
        element = doc.createElement(self.get_dom_field_name(name))
        if isinstance(value, EnhancedModel):
            element.setAttribute('key', str(value.pk))
        value = unicode(value)
        element.appendChild(doc.createTextNode(value))
        top.appendChild(element)
    return doc

def to_list(

self)

Return model fields as a list

def to_list(self):
    "Return model fields as a list"
    fields_list = []
    for field in self._meta.fields:
        fields_list.append({'name': field.name, 'value': unicode(getattr(self, field.name))})
    return fields_list

def unique_error_message(

self, model_class, unique_check)

def unique_error_message(self, model_class, unique_check):
    opts = model_class._meta
    model_name = capfirst(opts.verbose_name)
    # A unique field
    if len(unique_check) == 1:
        field_name = unique_check[0]
        field = opts.get_field(field_name)
        field_label = capfirst(field.verbose_name)
        # Insert the error into the error dict, very sneaky
        return field.error_messages['unique'] % {
            'model_name': six.text_type(model_name),
            'field_label': six.text_type(field_label)
        }
    # unique_together
    else:
        field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
        field_labels = get_text_list(field_labels, _('and'))
        return _("%(model_name)s with this %(field_label)s already exists.") % {
            'model_name': six.text_type(model_name),
            'field_label': six.text_type(field_labels)
        }

def validate_unique(

self, exclude=None)

Checks unique constraints on the model and raises ValidationError if any failed.

def validate_unique(self, exclude=None):
    """
    Checks unique constraints on the model and raises ``ValidationError``
    if any failed.
    """
    unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
    errors = self._perform_unique_checks(unique_checks)
    date_errors = self._perform_date_checks(date_checks)
    for k, v in date_errors.items():
        errors.setdefault(k, []).extend(v)
    if errors:
        raise ValidationError(errors)