DataFrame
. import pandas as pd # This is the standard way of importing the Pandas library
import numpy as np
DataFrame
¶wh = pd.read_csv("kumpula-weather-2017.csv")
wh.head() # The head method prints the first 5 rows
Year | m | d | Time | Time zone | Precipitation amount (mm) | Snow depth (cm) | Air temperature (degC) | |
---|---|---|---|---|---|---|---|---|
0 | 2017 | 1 | 1 | 00:00 | UTC | -1.0 | -1.0 | 0.6 |
1 | 2017 | 1 | 2 | 00:00 | UTC | 4.4 | -1.0 | -3.9 |
2 | 2017 | 1 | 3 | 00:00 | UTC | 6.6 | 7.0 | -6.5 |
3 | 2017 | 1 | 4 | 00:00 | UTC | -1.0 | 13.0 | -12.8 |
4 | 2017 | 1 | 5 | 00:00 | UTC | -1.0 | 10.0 | -17.8 |
wh["Snow depth (cm)"].head()
0 -1.0 1 -1.0 2 7.0 3 13.0 4 10.0 Name: Snow depth (cm), dtype: float64
wh["Air temperature (degC)"].mean() # Mean temperature
6.527123287671233
wh.drop("Time zone", axis=1).head() # Return a copy with one column removed, the original DataFrame stays intact
Year | m | d | Time | Precipitation amount (mm) | Snow depth (cm) | Air temperature (degC) | |
---|---|---|---|---|---|---|---|
0 | 2017 | 1 | 1 | 00:00 | -1.0 | -1.0 | 0.6 |
1 | 2017 | 1 | 2 | 00:00 | 4.4 | -1.0 | -3.9 |
2 | 2017 | 1 | 3 | 00:00 | 6.6 | 7.0 | -6.5 |
3 | 2017 | 1 | 4 | 00:00 | -1.0 | 13.0 | -12.8 |
4 | 2017 | 1 | 5 | 00:00 | -1.0 | 10.0 | -17.8 |
wh.head() # Original DataFrame is unchanged
Year | m | d | Time | Time zone | Precipitation amount (mm) | Snow depth (cm) | Air temperature (degC) | |
---|---|---|---|---|---|---|---|---|
0 | 2017 | 1 | 1 | 00:00 | UTC | -1.0 | -1.0 | 0.6 |
1 | 2017 | 1 | 2 | 00:00 | UTC | 4.4 | -1.0 | -3.9 |
2 | 2017 | 1 | 3 | 00:00 | UTC | 6.6 | 7.0 | -6.5 |
3 | 2017 | 1 | 4 | 00:00 | UTC | -1.0 | 13.0 | -12.8 |
4 | 2017 | 1 | 5 | 00:00 | UTC | -1.0 | 10.0 | -17.8 |
wh["Rainy"] = wh["Precipitation amount (mm)"] > 5
wh.head()
Year | m | d | Time | Time zone | Precipitation amount (mm) | Snow depth (cm) | Air temperature (degC) | Rainy | |
---|---|---|---|---|---|---|---|---|---|
0 | 2017 | 1 | 1 | 00:00 | UTC | -1.0 | -1.0 | 0.6 | False |
1 | 2017 | 1 | 2 | 00:00 | UTC | 4.4 | -1.0 | -3.9 | False |
2 | 2017 | 1 | 3 | 00:00 | UTC | 6.6 | 7.0 | -6.5 | True |
3 | 2017 | 1 | 4 | 00:00 | UTC | -1.0 | 13.0 | -12.8 | False |
4 | 2017 | 1 | 5 | 00:00 | UTC | -1.0 | 10.0 | -17.8 | False |
Series
is one-dimensional version of DataFrame
s=pd.Series([1, 4, 5, 2, 5, 2])
s
0 1 1 4 2 5 3 2 4 5 5 2 dtype: int64
s1=pd.Series([1, 4, 5, 2, 5, 2], index=list("abcdef"))
s1
a 1 b 4 c 5 d 2 e 5 f 2 dtype: int64
s1.index
Index(['a', 'b', 'c', 'd', 'e', 'f'], dtype='object')
We can also attach a name to this series:
s.name = "Grades"
s
0 1 1 4 2 5 3 2 4 5 5 2 Name: Grades, dtype: int64
The common attributes of the series are the name, dtype, and size:
print(f"Name: {s.name}, dtype: {s.dtype}, size: {s.size}")
Name: Grades, dtype: int64, size: 6
s[1] # Indexing
4
s1["b"]
4
s2=s[[0,5]] # Fancy indexing
print(s2)
0 1 5 2 Name: Grades, dtype: int64
t=s[-2:] # Slicing
t
4 5 5 2 Name: Grades, dtype: int64
t[4] # t[0] would give an error
5
The DataFrame is essentially a two dimensional object, and it can be created in three different ways:
df=pd.DataFrame(np.random.randn(2,3), columns=["First", "Second", "Third"], index=["a", "b"])
df
First | Second | Third | |
---|---|---|---|
a | -1.29709 | 0.030407 | -0.591453 |
b | -1.18542 | 1.482094 | -1.696548 |
df.index # These are the "row names"
Index(['a', 'b'], dtype='object')
df.columns # These are the "column names"
Index(['First', 'Second', 'Third'], dtype='object')
s1 = pd.Series([1,2,3])
s1
0 1 1 2 2 3 dtype: int64
s2 = pd.Series([4,5,6], name="b")
s2
0 4 1 5 2 6 Name: b, dtype: int64
pd.DataFrame(s1, columns=["a"])
a | |
---|---|
0 | 1 |
1 | 2 |
2 | 3 |
Multiple columns
pd.DataFrame({"a": s1, "b": s2})
a | b | |
---|---|---|
0 | 1 | 4 |
1 | 2 | 5 |
2 | 3 | 6 |
Each row is given as a dict, list, Series, or NumPy array.
df=pd.DataFrame([{"Wage" : 1000, "Name" : "Jack", "Age" : 21}, {"Wage" : 1500, "Name" : "John", "Age" : 29}])
df
Age | Name | Wage | |
---|---|---|---|
0 | 21 | Jack | 1000 |
1 | 29 | John | 1500 |
df = pd.DataFrame([[1000, "Jack", 21], [1500, "John", 29]], columns=["Wage", "Name", "Age"])
df
Wage | Name | Age | |
---|---|---|---|
0 | 1000 | Jack | 21 |
1 | 1500 | John | 29 |
df[0]
--------------------------------------------------------------------------- KeyError Traceback (most recent call last) ~\Anaconda3\lib\site-packages\pandas\core\indexes\base.py in get_loc(self, key, method, tolerance) 3077 try: -> 3078 return self._engine.get_loc(key) 3079 except KeyError: pandas\_libs\index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas\_libs\index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas\_libs\hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() pandas\_libs\hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() KeyError: 0 During handling of the above exception, another exception occurred: KeyError Traceback (most recent call last) <ipython-input-27-ad11118bc8f3> in <module>() ----> 1 df[0] ~\Anaconda3\lib\site-packages\pandas\core\frame.py in __getitem__(self, key) 2686 return self._getitem_multilevel(key) 2687 else: -> 2688 return self._getitem_column(key) 2689 2690 def _getitem_column(self, key): ~\Anaconda3\lib\site-packages\pandas\core\frame.py in _getitem_column(self, key) 2693 # get column 2694 if self.columns.is_unique: -> 2695 return self._get_item_cache(key) 2696 2697 # duplicate columns & possible reduce dimensionality ~\Anaconda3\lib\site-packages\pandas\core\generic.py in _get_item_cache(self, item) 2487 res = cache.get(item) 2488 if res is None: -> 2489 values = self._data.get(item) 2490 res = self._box_item_values(item, values) 2491 cache[item] = res ~\Anaconda3\lib\site-packages\pandas\core\internals.py in get(self, item, fastpath) 4113 4114 if not isna(item): -> 4115 loc = self.items.get_loc(item) 4116 else: 4117 indexer = np.arange(len(self.items))[isna(self.items)] ~\Anaconda3\lib\site-packages\pandas\core\indexes\base.py in get_loc(self, key, method, tolerance) 3078 return self._engine.get_loc(key) 3079 except KeyError: -> 3080 return self._engine.get_loc(self._maybe_cast_indexer(key)) 3081 3082 indexer = self.get_indexer([key], method=method, tolerance=tolerance) pandas\_libs\index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas\_libs\index.pyx in pandas._libs.index.IndexEngine.get_loc() pandas\_libs\hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() pandas\_libs\hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item() KeyError: 0
loc
: use explicit indicesiloc
: use the implicit integer indicesdf.loc[1, "Wage"]
1500
df.iloc[-1,-1] # Right lower corner of the DataFrame
29
df.loc[1, ["Name", "Wage"]]
Name John Wage 1500 Name: 1, dtype: object
wh.head()
Year | m | d | Time | Time zone | Precipitation amount (mm) | Snow depth (cm) | Air temperature (degC) | Rainy | |
---|---|---|---|---|---|---|---|---|---|
0 | 2017 | 1 | 1 | 00:00 | UTC | -1.0 | -1.0 | 0.6 | False |
1 | 2017 | 1 | 2 | 00:00 | UTC | 4.4 | -1.0 | -3.9 | False |
2 | 2017 | 1 | 3 | 00:00 | UTC | 6.6 | 7.0 | -6.5 | True |
3 | 2017 | 1 | 4 | 00:00 | UTC | -1.0 | 13.0 | -12.8 | False |
4 | 2017 | 1 | 5 | 00:00 | UTC | -1.0 | 10.0 | -17.8 | False |
wh2 = wh.drop(["Year", "m", "d", "Time", "Time zone"], axis=1) # taking averages over these is not very interesting
wh2.mean()
Precipitation amount (mm) 1.966301 Snow depth (cm) 0.966480 Air temperature (degC) 6.527123 Rainy 0.158904 dtype: float64
describe
method of the DataFrame
object gives different summary statistics for each (numeric) column. wh.describe()
Year | m | d | Precipitation amount (mm) | Snow depth (cm) | Air temperature (degC) | |
---|---|---|---|---|---|---|
count | 365.0 | 365.000000 | 365.000000 | 365.000000 | 358.000000 | 365.000000 |
mean | 2017.0 | 6.526027 | 15.720548 | 1.966301 | 0.966480 | 6.527123 |
std | 0.0 | 3.452584 | 8.808321 | 4.858423 | 3.717472 | 7.183934 |
min | 2017.0 | 1.000000 | 1.000000 | -1.000000 | -1.000000 | -17.800000 |
25% | 2017.0 | 4.000000 | 8.000000 | -1.000000 | -1.000000 | 1.200000 |
50% | 2017.0 | 7.000000 | 16.000000 | 0.200000 | -1.000000 | 4.800000 |
75% | 2017.0 | 10.000000 | 23.000000 | 2.700000 | 0.000000 | 12.900000 |
max | 2017.0 | 12.000000 | 31.000000 | 35.000000 | 15.000000 | 19.600000 |
wh["Snow depth (cm)"].unique()
array([-1., 7., 13., 10., 12., 9., 8., 5., 6., 4., 3., 15., 14., 2., nan, 0.])
The nan
value tells us that the measurement from that day is not available
For non-numeric types the special value None
is used to denote a missing value, and the dtype is promoted to object
.
pd.Series(["jack", "joe", None])
0 jack 1 joe 2 None dtype: object
The missing values can be located with the isnull
method:
wh[wh.isnull().any(axis=1)]
Year | m | d | Time | Time zone | Precipitation amount (mm) | Snow depth (cm) | Air temperature (degC) | Rainy | |
---|---|---|---|---|---|---|---|---|---|
74 | 2017 | 3 | 16 | 00:00 | UTC | 1.8 | NaN | 3.4 | False |
163 | 2017 | 6 | 13 | 00:00 | UTC | 0.6 | NaN | 12.6 | False |
308 | 2017 | 11 | 5 | 00:00 | UTC | 0.2 | NaN | 8.4 | False |
309 | 2017 | 11 | 6 | 00:00 | UTC | 2.0 | NaN | 7.5 | False |
313 | 2017 | 11 | 10 | 00:00 | UTC | 3.6 | NaN | 7.2 | False |
321 | 2017 | 11 | 18 | 00:00 | UTC | 11.3 | NaN | 5.9 | True |
328 | 2017 | 11 | 25 | 00:00 | UTC | 8.5 | NaN | 4.2 | True |
The notnull
method works conversively to the isnull
method.
The dropna
method of a DataFrame drops columns or rows that contain missing values from the DataFrame, depending on the axis
parameter.
wh.dropna().shape # Default axis is 0
(358, 9)
wh.dropna(axis=1).shape # Drops the columns containing missing values
(365, 8)
pd.to_numeric
function or the map method. astype
method.pd.Series(["1","2"]).map(int) # str -> int
0 1 1 2 dtype: int64
pd.Series([1,2]).map(str) # int -> str
0 1 1 2 dtype: object
pd.to_numeric(pd.Series([1,1.0]), downcast="integer") # object -> int
0 1 1 1 dtype: int8
pd.to_numeric(pd.Series([1,"a"]), errors="coerce") # conversion error produces Nan
0 1.0 1 NaN dtype: float64
pd.Series([1,2]).astype(str) # works for a single series
0 1 1 2 dtype: object
df = pd.DataFrame({"a": [1,2,3], "b" : [4,5,6], "c" : [7,8,9]})
print(df.dtypes)
print(df)
a int64 b int64 c int64 dtype: object a b c 0 1 4 7 1 2 5 8 2 3 6 9
df.astype(float) # Convert all columns
a | b | c | |
---|---|---|---|
0 | 1.0 | 4.0 | 7.0 |
1 | 2.0 | 5.0 | 8.0 |
2 | 3.0 | 6.0 | 9.0 |
df2 = df.astype({"b" : float, "c" : str}) # different types for columns
print(df2.dtypes)
print(df2)
a int64 b float64 c object dtype: object a b c 0 1 4.0 7 1 2 5.0 8 2 3 6.0 9
names = pd.Series(["donald", "theresa", "angela", "vladimir"])
names.str.capitalize()
0 Donald 1 Theresa 2 Angela 3 Vladimir dtype: object
# names.str. # Press the tab key
full_names = pd.Series(["Donald Trump", "Theresa May", "Angela Merkel", "Vladimir Putin"])
full_names.str.split() # one column
0 [Donald, Trump] 1 [Theresa, May] 2 [Angela, Merkel] 3 [Vladimir, Putin] dtype: object
full_names.str.split(expand=True) # two columns
0 | 1 | |
---|---|---|
0 | Donald | Trump |
1 | Theresa | May |
2 | Angela | Merkel |
3 | Vladimir | Putin |
def makedf(cols, ind):
data = {c : [str(c) + str(i) for i in ind] for c in cols}
return pd.DataFrame(data, ind)
a=makedf("AB", [0,1])
a
A | B | |
---|---|---|
0 | A0 | B0 |
1 | A1 | B1 |
b=makedf("AB", [2,3])
b
A | B | |
---|---|---|
2 | A2 | B2 |
3 | A3 | B3 |
c=makedf("CD", [0,1])
c
C | D | |
---|---|---|
0 | C0 | D0 |
1 | C1 | D1 |
d=makedf("BC", [2,3])
d
B | C | |
---|---|---|
2 | B2 | C2 |
3 | B3 | C3 |
pd.concat([a,b]) # The default axis is 0
A | B | |
---|---|---|
0 | A0 | B0 |
1 | A1 | B1 |
2 | A2 | B2 |
3 | A3 | B3 |
pd.concat([a,c], axis=1)
A | B | C | D | |
---|---|---|---|---|
0 | A0 | B0 | C0 | D0 |
1 | A1 | B1 | C1 | D1 |
r=pd.concat([a,a])
r # This is not usually what we want!
A | B | |
---|---|---|
0 | A0 | B0 |
1 | A1 | B1 |
0 | A0 | B0 |
1 | A1 | B1 |
## 1. automatic renumbering of rows:
pd.concat([a,a], ignore_index=True)
A | B | |
---|---|---|
0 | A0 | B0 |
1 | A1 | B1 |
2 | A0 | B0 |
3 | A1 | B1 |
## 2. hierarchical indexing
r2=pd.concat([a,a], keys=['first', 'second'])
r2
A | B | ||
---|---|---|---|
first | 0 | A0 | B0 |
1 | A1 | B1 | |
second | 0 | A0 | B0 |
1 | A1 | B1 |
r2["A"]["first"][0]
'A0'
pd.concat([a,d])
C:\Users\wench\Anaconda3\lib\site-packages\ipykernel_launcher.py:1: FutureWarning: Sorting because non-concatenation axis is not aligned. A future version of pandas will change to not sort by default. To accept the future behavior, pass 'sort=False'. To retain the current behavior and silence the warning, pass 'sort=True'. """Entry point for launching an IPython kernel.
A | B | C | |
---|---|---|---|
0 | A0 | B0 | NaN |
1 | A1 | B1 | NaN |
2 | NaN | B2 | C2 |
3 | NaN | B3 | C3 |
pd.concat([a,d], sort=False)
A | B | C | |
---|---|---|---|
0 | A0 | B0 | NaN |
1 | A1 | B1 | NaN |
2 | NaN | B2 | C2 |
3 | NaN | B3 | C3 |
pd.concat([a,d], join="inner")
B | |
---|---|
0 | B0 |
1 | B1 |
2 | B2 |
3 | B3 |
df = pd.DataFrame([[1000, "Jack", 21], [1500, "John", 29]], columns=["Wage", "Name", "Age"])
df
Wage | Name | Age | |
---|---|---|---|
0 | 1000 | Jack | 21 |
1 | 1500 | John | 29 |
df2 = pd.DataFrame({"Name" : ["John", "Jack"], "Occupation": ["Plumber", "Carpenter"]})
df2
Name | Occupation | |
---|---|---|
0 | John | Plumber |
1 | Jack | Carpenter |
pd.merge(df, df2)
Wage | Name | Age | Occupation | |
---|---|---|---|---|
0 | 1000 | Jack | 21 | Carpenter |
1 | 1500 | John | 29 | Plumber |
df3 = pd.concat([df2, pd.DataFrame({ "Name" : ["James"], "Occupation":["Painter"]})], ignore_index=True)
df3
Name | Occupation | |
---|---|---|
0 | John | Plumber |
1 | Jack | Carpenter |
2 | James | Painter |
pd.merge(df, df3) # By default an inner join is computed
Wage | Name | Age | Occupation | |
---|---|---|---|---|
0 | 1000 | Jack | 21 | Carpenter |
1 | 1500 | John | 29 | Plumber |
pd.merge(df, df3, how="outer") # Outer join
Wage | Name | Age | Occupation | |
---|---|---|---|---|
0 | 1000.0 | Jack | 21.0 | Carpenter |
1 | 1500.0 | John | 29.0 | Plumber |
2 | NaN | James | NaN | Painter |
wh.head()
Year | m | d | Time | Time zone | Precipitation amount (mm) | Snow depth (cm) | Air temperature (degC) | Rainy | |
---|---|---|---|---|---|---|---|---|---|
0 | 2017 | 1 | 1 | 00:00 | UTC | -1.0 | -1.0 | 0.6 | False |
1 | 2017 | 1 | 2 | 00:00 | UTC | 4.4 | -1.0 | -3.9 | False |
2 | 2017 | 1 | 3 | 00:00 | UTC | 6.6 | 7.0 | -6.5 | True |
3 | 2017 | 1 | 4 | 00:00 | UTC | -1.0 | 13.0 | -12.8 | False |
4 | 2017 | 1 | 5 | 00:00 | UTC | -1.0 | 10.0 | -17.8 | False |
wh3 = wh.rename(columns={"m": "Month", "d": "Day", "Precipitation amount (mm)" : "Precipitation",
"Snow depth (cm)" : "Snow", "Air temperature (degC)" : "Temperature"})
wh3.head()
Year | Month | Day | Time | Time zone | Precipitation | Snow | Temperature | Rainy | |
---|---|---|---|---|---|---|---|---|---|
0 | 2017 | 1 | 1 | 00:00 | UTC | -1.0 | -1.0 | 0.6 | False |
1 | 2017 | 1 | 2 | 00:00 | UTC | 4.4 | -1.0 | -3.9 | False |
2 | 2017 | 1 | 3 | 00:00 | UTC | 6.6 | 7.0 | -6.5 | True |
3 | 2017 | 1 | 4 | 00:00 | UTC | -1.0 | 13.0 | -12.8 | False |
4 | 2017 | 1 | 5 | 00:00 | UTC | -1.0 | 10.0 | -17.8 | False |
groups = wh3.groupby("Month")
len(groups)
12
for key, group in groups:
print(key, len(group))
1 31 2 28 3 31 4 30 5 31 6 30 7 31 8 31 9 30 10 31 11 30 12 31
groups.get_group(2).head() # Group with index two is February
Year | Month | Day | Time | Time zone | Precipitation | Snow | Temperature | Rainy | |
---|---|---|---|---|---|---|---|---|---|
31 | 2017 | 2 | 1 | 00:00 | UTC | 1.5 | 4.0 | -0.6 | False |
32 | 2017 | 2 | 2 | 00:00 | UTC | 0.2 | 5.0 | -0.8 | False |
33 | 2017 | 2 | 3 | 00:00 | UTC | -1.0 | 6.0 | -0.2 | False |
34 | 2017 | 2 | 4 | 00:00 | UTC | 2.7 | 6.0 | 0.4 | False |
35 | 2017 | 2 | 5 | 00:00 | UTC | -1.0 | 7.0 | -2.5 | False |
groups["Temperature"].mean()
Month 1 -2.316129 2 -2.389286 3 0.983871 4 2.676667 5 9.783871 6 13.726667 7 16.035484 8 16.183871 9 11.826667 10 5.454839 11 3.950000 12 1.741935 Name: Temperature, dtype: float64
groups["Precipitation"].sum()
Month 1 26.9 2 21.0 3 29.7 4 26.9 5 -5.9 6 59.3 7 14.2 8 70.1 9 51.2 10 173.5 11 117.2 12 133.6 Name: Precipitation, dtype: float64
wh4 = wh3.copy()
wh4.loc[wh4.Precipitation == -1, "Precipitation"] = 0
wh4.loc[wh4.Snow == -1, "Snow"] = 0
wh4.head()
Year | Month | Day | Time | Time zone | Precipitation | Snow | Temperature | Rainy | |
---|---|---|---|---|---|---|---|---|---|
0 | 2017 | 1 | 1 | 00:00 | UTC | 0.0 | 0.0 | 0.6 | False |
1 | 2017 | 1 | 2 | 00:00 | UTC | 4.4 | 0.0 | -3.9 | False |
2 | 2017 | 1 | 3 | 00:00 | UTC | 6.6 | 7.0 | -6.5 | True |
3 | 2017 | 1 | 4 | 00:00 | UTC | 0.0 | 13.0 | -12.8 | False |
4 | 2017 | 1 | 5 | 00:00 | UTC | 0.0 | 10.0 | -17.8 | False |
wh4.groupby("Month")["Precipitation"].sum()
Month 1 38.9 2 35.0 3 41.7 4 39.9 5 16.1 6 76.3 7 31.2 8 86.1 9 65.2 10 184.5 11 120.2 12 140.6 Name: Precipitation, dtype: float64
The apply method is very generic and only requires that for each group’s DataFrame the given function returns a DataFrame, Series, or a scalar.
wh4.groupby("Month").apply(lambda df : df.sort_values("Temperature"))
Year | Month | Day | Time | Time zone | Precipitation | Snow | Temperature | Rainy | ||
---|---|---|---|---|---|---|---|---|---|---|
Month | ||||||||||
1 | 4 | 2017 | 1 | 5 | 00:00 | UTC | 0.0 | 10.0 | -17.8 | False |
5 | 2017 | 1 | 6 | 00:00 | UTC | 0.3 | 10.0 | -17.8 | False | |
3 | 2017 | 1 | 4 | 00:00 | UTC | 0.0 | 13.0 | -12.8 | False | |
2 | 2017 | 1 | 3 | 00:00 | UTC | 6.6 | 7.0 | -6.5 | True | |
15 | 2017 | 1 | 16 | 00:00 | UTC | 0.0 | 8.0 | -4.2 | False | |
1 | 2017 | 1 | 2 | 00:00 | UTC | 4.4 | 0.0 | -3.9 | False | |
24 | 2017 | 1 | 25 | 00:00 | UTC | 0.6 | 6.0 | -3.8 | False | |
6 | 2017 | 1 | 7 | 00:00 | UTC | 5.3 | 10.0 | -3.8 | True | |
16 | 2017 | 1 | 17 | 00:00 | UTC | 0.2 | 8.0 | -3.5 | False | |
11 | 2017 | 1 | 12 | 00:00 | UTC | 8.0 | 7.0 | -2.8 | True | |
14 | 2017 | 1 | 15 | 00:00 | UTC | 0.0 | 8.0 | -2.8 | False | |
23 | 2017 | 1 | 24 | 00:00 | UTC | 0.0 | 6.0 | -2.2 | False | |
20 | 2017 | 1 | 21 | 00:00 | UTC | 0.4 | 5.0 | -1.8 | False | |
10 | 2017 | 1 | 11 | 00:00 | UTC | 0.0 | 7.0 | -1.6 | False | |
19 | 2017 | 1 | 20 | 00:00 | UTC | 0.3 | 5.0 | -0.6 | False | |
7 | 2017 | 1 | 8 | 00:00 | UTC | 0.0 | 12.0 | -0.5 | False | |
22 | 2017 | 1 | 23 | 00:00 | UTC | 0.1 | 6.0 | 0.1 | False | |
30 | 2017 | 1 | 31 | 00:00 | UTC | 0.0 | 4.0 | 0.2 | False | |
8 | 2017 | 1 | 9 | 00:00 | UTC | 1.1 | 12.0 | 0.5 | False | |
28 | 2017 | 1 | 29 | 00:00 | UTC | 2.6 | 3.0 | 0.6 | False | |
0 | 2017 | 1 | 1 | 00:00 | UTC | 0.0 | 0.0 | 0.6 | False | |
13 | 2017 | 1 | 14 | 00:00 | UTC | 0.1 | 8.0 | 0.8 | False | |
27 | 2017 | 1 | 28 | 00:00 | UTC | 1.8 | 4.0 | 0.8 | False | |
29 | 2017 | 1 | 30 | 00:00 | UTC | 5.6 | 5.0 | 1.0 | True | |
21 | 2017 | 1 | 22 | 00:00 | UTC | 0.2 | 5.0 | 1.0 | False | |
12 | 2017 | 1 | 13 | 00:00 | UTC | 0.1 | 13.0 | 1.1 | False | |
17 | 2017 | 1 | 18 | 00:00 | UTC | 0.9 | 8.0 | 1.1 | False | |
18 | 2017 | 1 | 19 | 00:00 | UTC | 0.0 | 5.0 | 1.6 | False | |
26 | 2017 | 1 | 27 | 00:00 | UTC | 0.0 | 4.0 | 1.6 | False | |
9 | 2017 | 1 | 10 | 00:00 | UTC | 0.3 | 9.0 | 1.7 | False | |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
12 | 340 | 2017 | 12 | 7 | 00:00 | UTC | 16.3 | 0.0 | -0.8 | True |
357 | 2017 | 12 | 24 | 00:00 | UTC | 0.0 | 0.0 | -0.3 | False | |
355 | 2017 | 12 | 22 | 00:00 | UTC | 0.0 | 0.0 | -0.1 | False | |
338 | 2017 | 12 | 5 | 00:00 | UTC | 0.7 | 0.0 | 0.0 | False | |
350 | 2017 | 12 | 17 | 00:00 | UTC | 0.0 | 5.0 | 0.1 | False | |
358 | 2017 | 12 | 25 | 00:00 | UTC | 5.9 | 0.0 | 0.3 | True | |
334 | 2017 | 12 | 1 | 00:00 | UTC | 3.4 | 0.0 | 0.9 | False | |
352 | 2017 | 12 | 19 | 00:00 | UTC | 0.2 | 3.0 | 1.0 | False | |
356 | 2017 | 12 | 23 | 00:00 | UTC | 7.6 | 0.0 | 1.2 | True | |
337 | 2017 | 12 | 4 | 00:00 | UTC | 0.0 | 0.0 | 1.3 | False | |
335 | 2017 | 12 | 2 | 00:00 | UTC | 5.3 | 5.0 | 1.4 | True | |
344 | 2017 | 12 | 11 | 00:00 | UTC | 1.3 | 0.0 | 1.4 | False | |
364 | 2017 | 12 | 31 | 00:00 | UTC | 3.2 | 0.0 | 1.6 | False | |
346 | 2017 | 12 | 13 | 00:00 | UTC | 4.2 | 5.0 | 1.6 | False | |
345 | 2017 | 12 | 12 | 00:00 | UTC | 35.0 | 0.0 | 1.6 | True | |
347 | 2017 | 12 | 14 | 00:00 | UTC | 5.2 | 4.0 | 1.6 | True | |
348 | 2017 | 12 | 15 | 00:00 | UTC | 10.0 | 10.0 | 1.7 | True | |
359 | 2017 | 12 | 26 | 00:00 | UTC | 7.8 | 0.0 | 1.9 | True | |
351 | 2017 | 12 | 18 | 00:00 | UTC | 3.5 | 5.0 | 2.0 | False | |
343 | 2017 | 12 | 10 | 00:00 | UTC | 0.0 | 0.0 | 2.0 | False | |
349 | 2017 | 12 | 16 | 00:00 | UTC | 1.3 | 6.0 | 2.4 | False | |
363 | 2017 | 12 | 30 | 00:00 | UTC | 4.1 | 0.0 | 2.5 | False | |
354 | 2017 | 12 | 21 | 00:00 | UTC | 0.0 | 0.0 | 2.5 | False | |
353 | 2017 | 12 | 20 | 00:00 | UTC | 3.6 | 3.0 | 2.6 | False | |
361 | 2017 | 12 | 28 | 00:00 | UTC | 3.7 | 0.0 | 2.8 | False | |
360 | 2017 | 12 | 27 | 00:00 | UTC | 1.1 | 0.0 | 3.8 | False | |
362 | 2017 | 12 | 29 | 00:00 | UTC | 7.8 | 0.0 | 3.8 | True | |
342 | 2017 | 12 | 9 | 00:00 | UTC | 0.2 | 0.0 | 4.2 | False | |
336 | 2017 | 12 | 3 | 00:00 | UTC | 7.2 | 0.0 | 5.0 | True | |
341 | 2017 | 12 | 8 | 00:00 | UTC | 2.0 | 0.0 | 5.2 | False |
365 rows × 9 columns