Skip to content

io

load_adjusters(path, mirror)

Get nominal adjuster locations from file.

Parameters:

Name Type Description Default
path str

Path to the data file.

required
mirror str

The mirror that these points belong to. Should be either: 'primary' or 'secondary'.

'primary'

Returns:

Name Type Description
adjusters dict[tuple[int, int], NDArray[float64]]

Nominal adjuster locations. This is indexed by a (row, col) tuple. Each entry is (5, 3) array where each row is an adjuster.

Source code in lat_alignment/io.py
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
def load_adjusters(
    path: str, mirror: str
) -> dict[tuple[int, int], NDArray[np.float64]]:
    """
    Get nominal adjuster locations from file.

    Parameters
    ----------
    path : str
        Path to the data file.
    mirror : str, default: 'primary'
        The mirror that these points belong to.
        Should be either: 'primary' or 'secondary'.

    Returns
    -------
    adjusters : dict[tuple[int, int], NDArray[np.float64]]
        Nominal adjuster locations.
        This is indexed by a (row, col) tuple.
        Each entry is `(5, 3)` array where each row is an adjuster.
    """
    if mirror not in ["primary", "secondary"]:
        raise ValueError(f"Invalid mirror: {mirror}")

    def _transform(coords):
        coords = np.atleast_2d(coords)
        coords -= np.array([120, 0, 0])  # cancel out shift
        return coord_transform(coords, "va_global", f"opt_{mirror}")

    # TODO: cleaner transform call
    adjusters = defaultdict(list)
    c_points = np.genfromtxt(path, dtype=str)
    for point in c_points:
        row = point[0][6]
        col = point[0][7]
        adjusters[(row, col)] += [_transform(np.array(point[2:], dtype=np.float64))[0]]
    adjusters = {rc: np.vstack(pts) for rc, pts in adjusters.items()}

    return adjusters

load_corners(path)

Get panel corners from file.

Parameters:

Name Type Description Default
path str

Path to the data file.

required

Returns:

Name Type Description
corners dict[tuple[int, int], ndarray[float64]]

The corners. This is indexed by a (row, col) tuple. Each entry is (4, 3) array where each row is a corner.

Source code in lat_alignment/io.py
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
def load_corners(path: str) -> dict[tuple[int, int], NDArray[np.float64]]:
    """
    Get panel corners from file.

    Parameters
    ----------
    path : str
        Path to the data file.

    Returns
    -------
    corners : dict[tuple[int, int], ndarray[np.float64]]
        The corners. This is indexed by a (row, col) tuple.
        Each entry is `(4, 3)` array where each row is a corner.
    """
    with open(path) as file:
        corners_raw = yaml.safe_load(file)

    corners = {
        (panel[7], panel[9]): np.vstack(
            [np.array(coord.split(), np.float64) for coord in coords]
        )
        for panel, coords in corners_raw.items()
    }
    return corners

load_data(path, source='photo', **kwargs)

Load a dataset from path.

Parameters:

Name Type Description Default
path str

The path to the data to load.

required
source str

The data source. Current valid options are:

  • photo
  • tracker
'photo'
**kwargs

Arguments to pass the relevent loader function. See load_photo and load_tracker for details.

{}
Source code in lat_alignment/io.py
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
def load_data(path: str, source: str = "photo", **kwargs) -> Dataset:
    """
    Load a dataset from path.

    Parameters
    ----------
    path : str
        The path to the data to load.
    source : str, default: 'photo'
        The data source. Current valid options are:

        * photo
        * tracker
    **kwargs
        Arguments to pass the relevent loader function.
        See `load_photo` and `load_tracker` for details.
    """
    if source == "photo":
        return load_photo(path, **kwargs)
    elif source == "tracker":
        return load_tracker(path, **kwargs)
    raise ValueError("Invalid data source")

load_photo(path, err_thresh=2, doubles_dist=10, plot=True)

Load photogrammetry data. Assuming first column is target names and next three are (x, y , z).

Parameters:

Name Type Description Default
path str

The path to the photogrammetry data.

required
err_thresh float

How many times the median photogrammetry error a target need to have to be cut.

2
plot bool

If True display a scatter plot of targets.

True

Returns:

Name Type Description
data Dataset

The photogrammetry data.

Source code in lat_alignment/io.py
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
def load_photo(
    path: str, err_thresh: float = 2, doubles_dist: float = 10, plot: bool = True
) -> Dataset:
    """
    Load photogrammetry data.
    Assuming first column is target names and next three are (x, y , z).

    Parameters
    ----------
    path : str
        The path to the photogrammetry data.
    err_thresh : float, default: 2
        How many times the median photogrammetry error
        a target need to have to be cut.
    plot: bool, default: True
        If True display a scatter plot of targets.

    Returns
    -------
    data : Dataset
        The photogrammetry data.
    """
    logger.info("Loading measurement data")
    labels = np.genfromtxt(path, dtype=str, delimiter=",", usecols=(0,))
    coords = np.genfromtxt(path, dtype=np.float64, delimiter=",", usecols=(1, 2, 3))
    errs = np.genfromtxt(path, dtype=np.float64, delimiter=",", usecols=(4, 5, 6))
    msk = (np.char.find(labels, "TARGET") >= 0) + (np.char.find(labels, "CODE") >= 0)

    labels, coords, errs = labels[msk], coords[msk], errs[msk]
    err = np.linalg.norm(errs, axis=-1)
    trg_msk = np.char.find(labels, "TARGET") >= 0
    code_msk = np.char.find(labels, "CODE") >= 0

    err_msk = (err < err_thresh * np.median(err[trg_msk])) + code_msk
    labels, coords, err, errs = (
        labels[err_msk],
        coords[err_msk],
        err[err_msk],
        errs[err_msk],
    )
    logger.info("\t%d good points loaded", len(coords))
    logger.info("\t%d high error points not loaded", np.sum(~err_msk))

    # Lets find and remove doubles
    # Dumb brute force
    trg_msk = np.char.find(labels, "TARGET") >= 0
    edm = make_edm(coords[trg_msk, :2])
    np.fill_diagonal(edm, np.nan)
    to_kill = []
    for i in range(len(edm)):
        if labels[trg_msk][i] in to_kill:
            continue
        imin = np.nanargmin(edm[i])
        if edm[i][imin] > doubles_dist:
            continue
        if err[trg_msk][i] < err[trg_msk][imin]:
            to_kill += [labels[trg_msk][imin]]
        else:
            to_kill += [labels[trg_msk][i]]
    msk = ~np.isin(labels, to_kill)
    logger.info("\tFound and removed %d doubles", len(to_kill))
    labels, coords, err, errs = labels[msk], coords[msk], err[msk], errs[msk]

    if plot:
        fig = plt.figure()
        ax = fig.add_subplot(projection="3d")
        p = ax.scatter(
            coords[:, 0],
            coords[:, 1],
            coords[:, 2],
            marker="x",
            c=err,
            vmax=np.percentile(err, 90),
        )
        fig.colorbar(p)
        plt.show()

    data = {
        label: np.array([coord, err]) for label, coord, err in zip(labels, coords, errs)
    }
    return DatasetPhotogrammetry(data)

load_tracker(path, group_dist=0.02, group_thresh=0.02, err=0.005, calc_sys_err=False, cam_transform_path=None, dist_err=8e-07, ang_err=5e-06)

Load laser tracker data.

Parameters:

Name Type Description Default
path str

The path to the laser tracker data. The type of data will be infered from the extension.

required
group_dist float

Distance between points in xy needed to group them for cuts. Only used for .txt files. Set to 0 to disable.

0.02
group_thresh float

Difference in z between point and the median z for a group to cut at. Only used for .txt files. Set to 0 to disable.

0.02
err float

The base error to assume for the tracker data. Only used for .txt files.

.005
calc_sys_err bool

It True calculate the systematic error based on provided tracker specs. Only used for .txt files.

False
cam_transform_path Optional[str]

Alignment matrix exported from CAM2. Used when calculating systematic error. If not provided we assume the data is the the FARO's internal coordinates. Only used for .txt files.

None
dist_err float

The systematic error as a function of distance in mm/mm. Only used for .txt files.

8e-7
ang_err float

The systematic error as a function of angle in mm/mm. Only used for .txt files.

8e-7

Returns:

Name Type Description
data Dataset

The tracker data. For txt or csv files this will be the base Dataset class. For yaml files this will be a DatasetReference.

Source code in lat_alignment/io.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
def load_tracker(
    path: str,
    group_dist=0.02,
    group_thresh=0.02,
    err=0.005,
    calc_sys_err: bool = False,
    cam_transform_path: Optional[str] = None,
    dist_err: float = 8e-7,
    ang_err: float = 5e-6,
) -> Dataset:
    """
    Load laser tracker data.

    Parameters
    ----------
    path : str
        The path to the laser tracker data.
        The type of data will be infered from the extension.
    group_dist : float, default: 0.02
        Distance between points in xy needed to group them for cuts.
        Only used for `.txt` files.
        Set to 0 to disable.
    group_thresh : float, default: 0.02
        Difference in z between point and the median z for a group to cut at.
        Only used for `.txt` files.
        Set to 0 to disable.
    err : float, default: .005
        The base error to assume for the tracker data.
        Only used for `.txt` files.
    calc_sys_err : bool, default: False
        It `True` calculate the systematic error based on provided tracker specs.
        Only used for `.txt` files.
    cam_transform_path : Optional[str], default: None
        Alignment matrix exported from CAM2.
        Used when calculating systematic error.
        If not provided we assume the data is the the FARO's internal coordinates.
        Only used for `.txt` files.
    dist_err : float, default 8e-7
        The systematic error as a function of distance in mm/mm.
        Only used for `.txt` files.
    ang_err : float, default 8e-7
        The systematic error as a function of angle in mm/mm.
        Only used for `.txt` files.

    Returns
    -------
    data : Dataset
        The tracker data.
        For txt or csv files this will be the base `Dataset` class.
        For yaml files this will be a `DatasetReference`.
    """
    ext = os.path.splitext(path)[1]
    if ext == ".yaml":
        return _load_tracker_yaml(path)
    elif ext == ".txt":
        return _load_tracker_txt(
            path,
            group_dist,
            group_thresh,
            err,
            calc_sys_err,
            cam_transform_path,
            dist_err,
            ang_err,
        )
    elif ext == ".csv":
        return _load_tracker_csv(path)
    raise ValueError(f"Invalid tracker data with extension {ext}")