<pre><code>import pandas as pd
import numpy as np
# Initializing the data
df = pd.DataFrame({'FDT_DATE': {0: 1417390467000, 1: 1417390428000, 2: 1417390608000, 3: 1417390548000,
4: 1417390668000, 5: 1417390717000, 6: 1417390758000, 7: 1417390798000,
8: 1417390818000, 9: 1417390827000, 10: 1417390907000},
'FFLT_LATITUDE': {0: 31.2899, 1: 31.291, 2: 31.2944, 3: 31.294, 4: 31.2954,
5: 31.2965, 6: 31.2946, 7: 31.2932, 8: 31.294, 9: 31.2946,
10: 31.2952},
'FFLT_LONGITUDE': {0: 121.4845, 1: 121.4859, 2: 121.4857, 3: 121.485, 4: 121.4886,
5: 121.4937, 6: 121.494, 7: 121.496, 8: 121.4966, 9: 121.4974,
10: 121.4986},
'FINT_STAT': {0: 0, 1: 0, 2: 1, 3: 1, 4: 1, 5: 1, 6: 0, 7: 1, 8: 1, 9: 1,
10: 0},
'FSTR_ID': {0: 112609, 1: 112609, 2: 112609, 3: 112609, 4: 112609, 5: 112609,
6: 112609, 7: 112609, 8: 112609, 9: 112609, 10: 112609}})
# Transforming sequences of records with FINT_STAT == 1 to unique GROUP_ID values
df['GROUP_ID'] = df['FINT_STAT'].apply(np.logical_not).cumsum()
# Marking groups with FINT_STAT == 0 for removing
df['GROUP_ID'] *= df['FINT_STAT']
# Removing marked groups
df['GROUP_ID'] = df['GROUP_ID'].replace(0, np.NaN)
# Grouping by columns GROUP_ID and FSTR_ID
gb = df.groupby(['GROUP_ID', 'FSTR_ID'])
result = pd.DataFrame()
# Appending columns with values of minimal FDT_DATE for every group
result['MIN_FDT_DATE'] = gb['FDT_DATE'].min()
# Aggregating results by applying the lambda
# which return list of pairs of FFLT_LATITUDE and FFLT_LONGITUDE
result['COORDINATES'] = gb.apply(lambda group: [(row['FFLT_LATITUDE'], row['FFLT_LONGITUDE'])
for _, row in group.iterrows()])
# Widening line and max column width for printing
pd.set_option('display.line_width', 300)
pd.set_option('display.max_colwidth', 200)
# Looking at result
print (result)
</code></pre>
<p>输出:</p>
^{pr2}$