Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Creating Pr for updation #1

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 51 additions & 0 deletions fifth_day_.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Parse the input file content
def parse_input_file(file_path):
with open(file_path, 'r') as file:
content = file.read()

# Split the content into rules and updates sections
sections = content.strip().split("\n\n")
rules = sections[0].split("\n")
updates = sections[1].split("\n")

# Parse rules into a list of tuples
ordering_rules = []
for rule in rules:
x, y = map(int, rule.split("|"))
ordering_rules.append((x, y))

# Parse updates into lists of integers
update_lists = [list(map(int, update.split(","))) for update in updates]

return ordering_rules, update_lists

# Function to calculate the sum of middle page numbers for valid updates
def calculate_middle_sum(ordering_rules, update_lists):
# Check if an update respects all applicable rules
def is_valid_update(update, rules):
for x, y in rules:
if x in update and y in update:
if update.index(x) > update.index(y):
return False
return True

# Validate updates
valid_updates = [update for update in update_lists if is_valid_update(update, ordering_rules)]

# Calculate the middle page number sum
middle_sum = 0
for update in valid_updates:
middle_index = len(update) // 2
middle_sum += update[middle_index]

return middle_sum

# File path to the input
file_path = 'fifth.txt' # Replace with your actual file path

# Parse the input file
ordering_rules, update_lists = parse_input_file(file_path)

# Calculate and print the result
result = calculate_middle_sum(ordering_rules, update_lists)
print(f"Sum of middle page numbers from valid updates: {result}")
82 changes: 82 additions & 0 deletions fifth_day_part_2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
def parse_input_file(file_path):
with open(file_path, 'r') as file:
content = file.read()

# Split the content into rules and updates sections
sections = content.strip().split("\n\n")
rules = sections[0].split("\n")
updates = sections[1].split("\n")

# Parse rules into a list of tuples
ordering_rules = []
for rule in rules:
x, y = map(int, rule.split("|"))
ordering_rules.append((x, y))

# Parse updates into lists of integers
update_lists = [list(map(int, update.split(","))) for update in updates]

return ordering_rules, update_lists

# Reorder an update using topological sorting
def reorder_update(update, rules):
from collections import defaultdict, deque

# Build a graph based on the rules
graph = defaultdict(list)
in_degree = defaultdict(int)

# Restrict rules to pages in the update
update_set = set(update)
for x, y in rules:
if x in update_set and y in update_set:
graph[x].append(y)
in_degree[y] += 1
in_degree.setdefault(x, 0)

# Perform topological sorting
queue = deque([node for node in update if in_degree[node] == 0])
sorted_update = []

while queue:
node = queue.popleft()
sorted_update.append(node)
for neighbor in graph[node]:
in_degree[neighbor] -= 1
if in_degree[neighbor] == 0:
queue.append(neighbor)

return sorted_update

def calculate_middle_sum_for_reordered_updates(ordering_rules, update_lists):
# Check if an update respects all applicable rules
def is_valid_update(update, rules):
for x, y in rules:
if x in update and y in update:
if update.index(x) > update.index(y):
return False
return True

incorrectly_ordered = []
for update in update_lists:
if not is_valid_update(update, ordering_rules):
incorrectly_ordered.append(update)

# Reorder incorrectly ordered updates and calculate the middle sum
middle_sum = 0
for update in incorrectly_ordered:
reordered = reorder_update(update, ordering_rules)
middle_index = len(reordered) // 2
middle_sum += reordered[middle_index]

return middle_sum

# File path to the input
file_path = 'fifth.txt' # Replace with your actual file path

# Parse the input file
ordering_rules, update_lists = parse_input_file(file_path)

# Calculate the sum of middle page numbers for reordered updates
result = calculate_middle_sum_for_reordered_updates(ordering_rules, update_lists)
print(f"Sum of middle page numbers from reordered updates: {result}")
31 changes: 31 additions & 0 deletions first_part_1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
def total_distance(left_list, right_list):
# Sort both lists
left_list.sort()
right_list.sort()

# Calculate the total distance
total_distance = sum(abs(l - r) for l, r in zip(left_list, right_list))
return total_distance

# Read input file
file_path = "1st.txt" # Replace with your file path if different

# Initialize lists
left_list = []
right_list = []

# Parse the input file
with open(file_path, "r") as file:
for line in file:
numbers = list(map(int, line.split()))
if len(numbers) == 2: # Ensure there are exactly two numbers per line
left_list.append(numbers[0])
right_list.append(numbers[1])

# Ensure both lists have the same length
if len(left_list) != len(right_list):
print("Error: The two lists have different lengths.")
else:
# Calculate the total distance
result = total_distance(left_list, right_list)
print(f"Total Distance: {result}")
35 changes: 35 additions & 0 deletions first_part_2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
from collections import Counter

def calculate_similarity_score(left_list, right_list):
# Count occurrences of each number in the right list
right_count = Counter(right_list)

# Calculate the similarity score
similarity_score = 0
for number in left_list:
if number in right_count:
similarity_score += number * right_count[number]
return similarity_score

# Read input file
file_path = "1st.txt" # Replace with your file path if different

# Initialize lists
left_list = []
right_list = []

# Parse the input file
with open(file_path, "r") as file:
for line in file:
numbers = list(map(int, line.split()))
if len(numbers) == 2: # Ensure there are exactly two numbers per line
left_list.append(numbers[0])
right_list.append(numbers[1])

# Ensure both lists have the same length
if len(left_list) != len(right_list):
print("Error: The two lists have different lengths.")
else:
# Calculate the similarity score
similarity_score = calculate_similarity_score(left_list, right_list)
print(f"Similarity Score: {similarity_score}")
45 changes: 45 additions & 0 deletions fouth_day_part_1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
def count_word_occurrences(grid, word):
rows = len(grid)
cols = len(grid[0])
word_length = len(word)
count = 0

# Helper function to check if the word matches in a given direction
def match_at_direction(x, y, dx, dy):
for i in range(word_length):
nx, ny = x + i * dx, y + i * dy
if not (0 <= nx < rows and 0 <= ny < cols) or grid[nx][ny] != word[i]:
return False
return True

# Iterate over every cell in the grid
for x in range(rows):
for y in range(cols):
# Check all 8 possible directions
directions = [
(0, 1), # Horizontal right
(0, -1), # Horizontal left
(1, 0), # Vertical down
(-1, 0), # Vertical up
(1, 1), # Diagonal top-left to bottom-right
(-1, -1), # Diagonal bottom-right to top-left
(1, -1), # Diagonal top-right to bottom-left
(-1, 1) # Diagonal bottom-left to top-right
]
for dx, dy in directions:
if match_at_direction(x, y, dx, dy):
count += 1

return count

# Read input file
file_path = "fourth.txt" # Replace with your file path if different

# Parse the grid from the file
with open(file_path, "r") as file:
grid = [list(line.strip()) for line in file]

# Count occurrences of "XMAS"
word = "XMAS"
occurrences = count_word_occurrences(grid, word)
print(f"Total occurrences of '{word}': {occurrences}")
30 changes: 30 additions & 0 deletions second_part_1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
def is_safe(report):
# Check if all differences are between 1 and 3
differences = [report[i+1] - report[i] for i in range(len(report) - 1)]
if not all(1 <= abs(diff) <= 3 for diff in differences):
return False

# Check if the report is strictly increasing or decreasing
is_increasing = all(diff > 0 for diff in differences)
is_decreasing = all(diff < 0 for diff in differences)

return is_increasing or is_decreasing

def count_safe_reports(file_path):
safe_count = 0

# Read and process the file
with open(file_path, "r") as file:
for line in file:
report = list(map(int, line.split()))
if is_safe(report):
safe_count += 1

return safe_count

# Input file path
file_path = "second.txt" # Replace with the path to your file

# Count and print the number of safe reports
safe_reports = count_safe_reports(file_path)
print(f"Number of Safe Reports: {safe_reports}")
44 changes: 44 additions & 0 deletions second_part_2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
from collections import deque

def is_safe(report):
# Check if all differences are between 1 and 3
differences = [report[i+1] - report[i] for i in range(len(report) - 1)]
if not all(1 <= abs(diff) <= 3 for diff in differences):
return False

# Check if the report is strictly increasing or decreasing
is_increasing = all(diff > 0 for diff in differences)
is_decreasing = all(diff < 0 for diff in differences)

return is_increasing or is_decreasing

def is_safe_with_dampener(report):
if is_safe(report):
return True

# Check all subsets with one level removed
for i in range(len(report)):
modified_report = report[:i] + report[i+1:] # Remove the ith level
if is_safe(modified_report):
return True

return False

def count_safe_reports_with_dampener(file_path):
safe_count = 0

# Read and process the file
with open(file_path, "r") as file:
for line in file:
report = list(map(int, line.split()))
if is_safe_with_dampener(report):
safe_count += 1

return safe_count

# Input file path
file_path = "second.txt" # Replace with your file path if different

# Count and print the number of safe reports
safe_reports = count_safe_reports_with_dampener(file_path)
print(f"Number of Safe Reports (with Dampener): {safe_reports}")
53 changes: 53 additions & 0 deletions tenth_day_part_1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
from collections import deque

def parse_map(file_path):
"""Read the map from a file and parse it into a 2D grid of integers."""
with open(file_path, 'r') as file:
return [list(map(int, line.strip())) for line in file.readlines()]

def bfs(grid, start):
"""Perform BFS to find reachable height-9 positions."""
rows, cols = len(grid), len(grid[0])
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
queue = deque([start])
visited = set()
reachable_nines = set()

while queue:
x, y = queue.popleft()
if (x, y) in visited:
continue
visited.add((x, y))

# If height is 9, add to reachable nines
if grid[x][y] == 9:
reachable_nines.add((x, y))
continue

# Explore neighbors
for dx, dy in directions:
nx, ny = x + dx, y + dy
if 0 <= nx < rows and 0 <= ny < cols and (nx, ny) not in visited:
if grid[nx][ny] == grid[x][y] + 1: # Valid hiking trail step
queue.append((nx, ny))

return reachable_nines

def compute_trailhead_scores(grid):
"""Compute the score for all trailheads in the map."""
rows, cols = len(grid), len(grid[0])
total_score = 0

for x in range(rows):
for y in range(cols):
if grid[x][y] == 0: # Trailhead found
reachable_nines = bfs(grid, (x, y))
total_score += len(reachable_nines)

return total_score

# Main execution
file_path = "tenth.txt"
grid = parse_map(file_path)
result = compute_trailhead_scores(grid)
print("Sum of scores of all trailheads:", result)
Loading