64 lines
1.3 KiB
Python
64 lines
1.3 KiB
Python
![]() |
#!/usr/bin/env python
|
||
|
|
||
|
import re
|
||
|
from collections import defaultdict
|
||
|
|
||
|
RE_INS = re.compile("(.*)\s+->\s+(.*)$")
|
||
|
|
||
|
def tokenize_polymer(polymer):
|
||
|
result = defaultdict(int)
|
||
|
for i in range(0, len(polymer)-1):
|
||
|
k = polymer[i:i+2]
|
||
|
result[k] += 1
|
||
|
return result
|
||
|
|
||
|
def iterate_polymer(polymer, rules):
|
||
|
delta = defaultdict(int)
|
||
|
pairs = list(polymer.keys())
|
||
|
for pair in pairs:
|
||
|
if pair not in rules:
|
||
|
continue
|
||
|
if polymer[pair] == 0:
|
||
|
continue
|
||
|
p1 = pair[0]+rules[pair]
|
||
|
p2 = rules[pair]+pair[1]
|
||
|
delta[p1] += polymer[pair]
|
||
|
delta[p2] += polymer[pair]
|
||
|
delta[pair] -= polymer[pair]
|
||
|
for pair in delta:
|
||
|
polymer[pair] += delta[pair]
|
||
|
return polymer
|
||
|
|
||
|
polymer = None
|
||
|
rules = dict()
|
||
|
polymer_pairs = defaultdict(int)
|
||
|
|
||
|
with open("input01.txt","r") as f:
|
||
|
polymer = f. readline().strip()
|
||
|
|
||
|
for line in f:
|
||
|
line = line.strip()
|
||
|
if len(line) == 0:
|
||
|
continue
|
||
|
m = RE_INS.match(line)
|
||
|
if not m:
|
||
|
print(line)
|
||
|
rules[m.group(1)] = m.group(2)
|
||
|
for i in range(0, len(polymer)-1):
|
||
|
k = polymer[i:i+2]
|
||
|
polymer_pairs[k] += 1
|
||
|
|
||
|
polymer_pairs = tokenize_polymer(polymer)
|
||
|
for i in range(40):
|
||
|
polymer_pairs = iterate_polymer(polymer_pairs, rules)
|
||
|
# print(polymer_pairs)
|
||
|
|
||
|
sums = defaultdict(int)
|
||
|
for i in polymer_pairs.keys():
|
||
|
sums[i[1]] += polymer_pairs[i]
|
||
|
sums[polymer[0]] += 1
|
||
|
|
||
|
p_max = max([x for x in sums.values()])
|
||
|
p_min = min([x for x in sums.values()])
|
||
|
print(p_max - p_min)
|