1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
#!/usr/bin/env python
"""
Blocking Google Play and iTunes movies tables.

The output of blocking is written to tableC.csv. The blocking is done by
matching the release year of movies in the both the tables. If the movie has no
year set, it is matched with all movies in the other table. In addition, we
also further refine the matches based on length of the movie name. More
specifically, we match a movie in one table (iTunes) with movies in the other
table (Google) whose length of the movie name is within a range of the matched
movie (configurable). We use SQLite3 to create an index of tuples based on year
and length of the name to query matches efficiently.

Usage: blocking.py [options] <google.json[.gz]> <itunes.json[.gz]>

Options:
  -h, --help            show this help message and exit
  -d, --dryrun          Dry run.
  -n RANGE, --name-length-range=RANGE
                        Range of the name lengths to match against.
  -l LOGLEVEL, --log=LOGLEVEL
                        Logging level: DEBUG|INFO|WARNING|ERROR|CRITICAL
"""

import gzip
import json
import logging
import optparse
import re
import sqlite3
import sys
import unidecode
from collections import defaultdict

GOOGLE_PROJECTION = [ 'id', 'name', 'year', 'actors', 'directors', ]
ITUNES_PROJECTION = [ 'id', 'name', 'year', 'actors', 'directors', ]
NAME_LENGTH_RANGE = 10
DB_CONN = None
DB_NAME = ':memory:'
DRY_RUN = False
TOKEN_INDEX = defaultdict(set)
ALL_YEARS = []

def read_file(path):
    """Read file at `path'.

    Args:
        path: Path to file to read. The file could possibly a gzip'ed file
              ending with .gz extension.

    Returns:
        File object on which to perform read(), readlines(), etc.
    """
    if path.endswith('.gz'):
        return gzip.open(path, 'rb')
    else:
        return open(path, 'r')

def read_json(path):
    """Read JSON file at `path'.

    Args:
        path: Path to JSON file to read.

    Returns:
        Object encoded in the JSON file.
    """
    fp = read_file(path)
    json_struct = json.load(fp)
    fp.close()
    return json_struct

def project_item_list(item, selection, NULL=''):
    """Project item dictionary into a list.

    Project attributes in 'selection' list. We substitute NULL when an attribute
    is absent.

    Args:
        item: Item dictionary to project, when key-value pair correspond to
            attribute and value.
        selection: List of attributes to project.
        NULL: NULL entry to substitute, when attribute is absent in item.
            Defaults to '' (empty string).

    Returns:
        List of projected values of the item tuple.

    >>> project_item_list({'a': 0, 'b': 1, 'c': 2}, ['a', 'b'])
    [0, 1]
    >>> project_item_list({'a': 0, 'b': 1, 'c': 2}, ['a', 'd'])
    [0, '']
    """
    return [item[attr] if item.has_key(attr) else NULL for attr in selection]

def strip_brackets(title):
    """
    >>> strip_brackets('Scarface ')
    'Scarface'
    >>> strip_brackets(' Scarface (1983) ')
    'Scarface'
    >>> strip_brackets(' Scarface (Director\\'s Cut) ')
    'Scarface'
    >>> strip_brackets(' Scarface (Special Edition) ')
    'Scarface'
    >>> strip_brackets(' Scarface (Dubbed) ')
    'Scarface'
    """
    title = title.strip()
    m = re.match('.*(\(.*\))', title)
    if m:
        title = (title[:-len(m.group(1))]).strip()
    return title

STOP_WORDS = set([ 'the', 'a', 'an',
                   'be', 'is', 'am', 'are', 'was', 'were',
                   'of', 'on', 'to', 'in', 'by', 'and', 'as', 'at', 'for',
                   'if', 'it', 's', 'or' ])
def tokenize(string):
    """Tokenize the string into lowercase words and normalizing unicode chars to
    unaccented ASCII chars.

    >>> tokenize(u'Star Wars: Episode VI - The Return of the Jedi')
    ['star', 'wars', 'episode', 'vi', 'return', 'jedi']
    >>> tokenize(u'Dr. Strangelove or: How I Learned to Stop Worrying and Love the Bomb')
    ['dr', 'strangelove', 'how', 'i', 'learned', 'stop', 'worrying', 'love', 'bomb']
    >>> tokenize(u'It\\'s a Wonderful Life')
    ['wonderful', 'life']
    >>> tokenize(u'Am\xe9lie')
    ['amelie']
    """
    return filter(lambda w: w not in STOP_WORDS,
            [w for w in re.split('\W', unidecode.unidecode(string).lower()) if w])

def create_index(A):
    """Creates a index of Google Play movies table in an in-memory SQLite3 DB.
    """
    logging.debug('Creating index on table A.')
    global DB_CONN, TOKEN_INDEX
    DB_CONN = sqlite3.connect(DB_NAME)
    cur = DB_CONN.cursor()

    # Create table and index in SQLite3.
    cur.execute("""CREATE TABLE google (id TEXT, name TEXT, year INT, actors TEXT,
                                        directors TEXT, name_length INT);""")
    cur.execute("""CREATE INDEX google_index ON google(year, name_length);""")

    # Insert tuples to SQLite3
    for item in A['tuples']:
        if not item.has_key('year'):
            raise Exception('Empty year')
        values = project_item_list(item, GOOGLE_PROJECTION, None)
        values[2] = int(values[2]) # Year
        stripped_name = strip_brackets(values[1])
        values.append(len(stripped_name)) # Name length
        cur.execute('INSERT INTO google VALUES (?, ?, ?, ?, ?, ?)', values)
        tokens = tokenize(stripped_name)
        assert len(tokens) > 0
        for token in tokens:
            TOKEN_INDEX[token].add(values[0])
    DB_CONN.commit()

    # Find all years present in the Google Play table.
    global ALL_YEARS
    for row in cur.execute('SELECT year FROM google GROUP BY year;'):
        ALL_YEARS.append(row[0])
    cur.close()

def match_items(cur, year, length, itemB, intersection_set):
    """Match entries in Google Play with same year and name length between
    length +/- NAME_LENGTH_RANGE/2 range.
    """
    for item in cur.execute("""SELECT * FROM google WHERE
            year = ? AND name_length >= ? AND name_length <= ?""",
            (year, length - NAME_LENGTH_RANGE/2, length + NAME_LENGTH_RANGE/2)):
        if item[0] in intersection_set:
            yield (item[:len(GOOGLE_PROJECTION)], itemB) if not DRY_RUN else 1

def match_items_allyears(cur, length, itemB, intersection_set):
    """Match entries in Google Play across all years with name length between
    length +/- NAME_LENGTH_RANGE/2 range.
    """
    global ALL_YEARS
    for year in ALL_YEARS:
        for match in match_items(cur, year, length, itemB, intersection_set):
            yield match

def at_least_one_token(title):
    """Set of movie ids with at least one token in common.
    """
    global TOKEN_INDEX
    tokens = tokenize(title)
    # 'The the the', 'A. (Anonymous)', 'Of By For'
    #assert len(tokens) > 0
    at_least_one = set()
    for token in tokenize(title):
        at_least_one.update(TOKEN_INDEX[token])
    return at_least_one

def probe(B):
    """Probe tuples from iTunes table into the Google tuples index find suitable
    matches.
    """
    logging.debug('Probing index using table B.')
    cur = DB_CONN.cursor()
    for item in B['tuples']:
        projected = project_item_list(item, ITUNES_PROJECTION)
        stripped_name = strip_brackets(item['name'])
        at_least_set = at_least_one_token(stripped_name)
        length = len(stripped_name)
        if item.has_key('year') and item['year'] != '':
            # Year is well-defined, so match with specific year.
            for match in match_items(cur, item['year'],
                    length, projected, at_least_set):
                yield match
        else:
            # Year is not defined, so match with *all* years.
            for match in match_items_allyears(cur, length, projected,
                    at_least_set):
                yield match

def csvquote(value):
    """CSV quote row value. Quote values with comma and quotes. Escape quotes in
    the row value with two quotes.

    >>> csvquote('abc')
    u'abc'
    >>> csvquote('a, b, c')
    u'"a, b, c"'
    >>> csvquote('a "b" c')
    u'"a ""b"" c"'
    >>> csvquote('a\\nb')  # Escape for doctest
    u'"a\\nb"'
    >>> csvquote('a, \\n"b", c')  # Escape for doctest
    u'"a, \\n""b"", c"'
    >>> csvquote(1)
    u'1'
    """
    value = u'{0}'.format(value)
    value = value.replace('\r', ' ')
    quote = False
    if value.find(u'"') != -1:
        value = value.replace('"', '""')
        quote = True
    if value.find(u',') != -1 or value.find(u'\n') != -1:
        quote = True
    if quote:
        return u'"' + value + u'"'
    else:
        return value

def write_header(C, A, B):
    """Write the header of the CSV output format.
    """
    attributes = [('pairId', 'INTEGER')]
    attributesA = []
    for attr in GOOGLE_PROJECTION:
        attr_type = filter(lambda item: item['name'] == attr,
                A['table']['attributes'])[0]['type']
        attributesA.append((A['table']['name'] + '.' + attr, attr_type))
    attributesB = []
    for attr in ITUNES_PROJECTION:
        attr_type = filter(lambda item: item['name'] == attr,
                B['table']['attributes'])[0]['type']
        attributesB.append((B['table']['name'] + '.' + attr, attr_type))
    # Assuming first attribute is `id'.
    attributes += attributesA[:1] + attributesB[:1]
    attributes += attributesA[1:] + attributesB[1:]
    attributes = map(lambda x: u'{0}:{1}'.format(x[0], x[1]), attributes)
    line = u','.join(attributes) + u'\n'
    C.write(line.encode('utf-8'))

def output(matches, A, B):
    """Write the output of the blocking step.
    """
    logging.debug('Writing output after blocking.')
    written = 0
    with open('tableC.csv', 'w') as C:
        write_header(C, A, B)
        pair_id = 0
        for itemA, itemB in matches:
            itemA = list(itemA)
            written += 1
            attributes = [pair_id]
            # Assuming first attribute is `id'.
            attributes += itemA[:1] + itemB[:1]
            attributes += itemA[1:] + itemB[1:]
            line = u','.join(map(csvquote, attributes)) + u'\n'
            C.write(line.encode('utf-8'))
            pair_id += 1
        logging.debug('Matches: {0}'.format(written))

def count(matches):
    """Count the number of matches produced for debugging in dry run mode.
    """
    logging.debug('Matches: {0}'.format(sum(matches)))

def block(google, itunes):
    """Block table google table with itunes table.
    """
    create_index(google)
    matches = probe(itunes)
    if DRY_RUN:
        count(matches)
    else:
        output(matches, google, itunes)

def main():
    """Main entry function to blocking code.
    """
    usage = '%prog [options] <google.json[.gz]> <itunes.json[.gz]>'
    parser = optparse.OptionParser(usage=usage)
    parser.add_option('-d', '--dryrun', dest='dryrun', action="store_true",
            default=False, help='Dry run.')
    parser.add_option('-n', '--name-length-range', dest='range', type="int",
            default=16, help='Range of the name lengths to match against.')
    parser.add_option('-l', '--log', dest='loglevel', default='DEBUG',
            help='Logging level: DEBUG|INFO|WARNING|ERROR|CRITICAL')
    (options, args) = parser.parse_args()

    level = getattr(logging, options.loglevel.upper(), None)
    if not isinstance(level, int):
        raise ValueError('Invalid log level: %s' % options.loglevel)
    logging.basicConfig(level=level)
    global DRY_RUN, NAME_LENGTH_RANGE
    if options.dryrun:
        DRY_RUN = options.dryrun
        logging.debug('Mode: Dry run.')
    logging.debug('Name length range: {0}'.format(options.range))
    NAME_LENGTH_RANGE = options.range

    if len(args) != 2:
        parser.print_help()
        sys.exit(1)

    google = read_json(args[0])
    itunes = read_json(args[1])
    block(google, itunes)

if __name__ == '__main__':
    main()