Staging
v0.8.1
https://github.com/torvalds/linux
Revision a3b25d157d5a52ef3f9296a739ee28f5d36e8968 authored by Linus Torvalds on 24 May 2019, 16:12:46 UTC, committed by Linus Torvalds on 24 May 2019, 16:12:46 UTC
Pull drm fixes from Dave Airlie:
 "Nothing too unusual here for rc2. Except the amdgpu DMCU firmware
  loading fix caused build breakage with a different set of Kconfig
  options. I've just reverted it for now until the AMD folks can rewrite
  it to avoid that problem.

  i915:
   - boosting fix
   - bump ready task fixes
   - GVT - reset fix, error return, TRTT handling fix

  amdgpu:
   - DMCU firmware loading fix
   - Polaris 10 pci id for kfd
   - picasso screen corruption fix
   - SR-IOV fixes
   - vega driver reload fixes
   - SMU locking fix
   - compute profile fix for kfd

  vmwgfx:
   - integer overflow fixes
   - dma sg fix

  sun4i:
   - HDMI phy fixes

  gma500:
   - LVDS detection fix

  panfrost:
   - devfreq selection fix"

* tag 'drm-fixes-2019-05-24-1' of git://anongit.freedesktop.org/drm/drm: (32 commits)
  Revert "drm/amd/display: Don't load DMCU for Raven 1"
  drm/panfrost: Select devfreq
  drm/gma500/cdv: Check vbt config bits when detecting lvds panels
  drm/vmwgfx: integer underflow in vmw_cmd_dx_set_shader() leading to an invalid read
  drm/vmwgfx: NULL pointer dereference from vmw_cmd_dx_view_define()
  drm/vmwgfx: Use the dma scatter-gather iterator to get dma addresses
  drm/vmwgfx: Fix compat mode shader operation
  drm/vmwgfx: Fix user space handle equal to zero
  drm/vmwgfx: Don't send drm sysfs hotplug events on initial master set
  drm/i915/gvt: Fix an error code in ppgtt_populate_spt_by_guest_entry()
  drm/i915/gvt: do not let TRTTE and 0x4dfc write passthrough to hardware
  drm/i915/gvt: add 0x4dfc to gen9 save-restore list
  drm/i915/gvt: Tiled Resources mmios are in-context mmios for gen9+
  drm/i915/gvt: use cmd to restore in-context mmios to hw for gen9 platform
  drm/i915/gvt: emit init breadcrumb for gvt request
  drm/amdkfd: Fix compute profile switching
  drm/amdgpu: skip fw pri bo alloc for SRIOV
  drm/amd/powerplay: fix locking in smu_feature_set_supported()
  drm/amdgpu/gmc9: set vram_width properly for SR-IOV
  drm/amdgpu/soc15: skip reset on init
  ...
2 parent s 4dde821 + c074989
Raw File
Tip revision: a3b25d157d5a52ef3f9296a739ee28f5d36e8968 authored by Linus Torvalds on 24 May 2019, 16:12:46 UTC
Merge tag 'drm-fixes-2019-05-24-1' of git://anongit.freedesktop.org/drm/drm
Tip revision: a3b25d1
buffer.c
// SPDX-License-Identifier: GPL-2.0
/*
 *  linux/fs/hpfs/buffer.c
 *
 *  Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999
 *
 *  general buffer i/o
 */
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/blkdev.h>
#include "hpfs_fn.h"

secno hpfs_search_hotfix_map(struct super_block *s, secno sec)
{
	unsigned i;
	struct hpfs_sb_info *sbi = hpfs_sb(s);
	for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
		if (sbi->hotfix_from[i] == sec) {
			return sbi->hotfix_to[i];
		}
	}
	return sec;
}

unsigned hpfs_search_hotfix_map_for_range(struct super_block *s, secno sec, unsigned n)
{
	unsigned i;
	struct hpfs_sb_info *sbi = hpfs_sb(s);
	for (i = 0; unlikely(i < sbi->n_hotfixes); i++) {
		if (sbi->hotfix_from[i] >= sec && sbi->hotfix_from[i] < sec + n) {
			n = sbi->hotfix_from[i] - sec;
		}
	}
	return n;
}

void hpfs_prefetch_sectors(struct super_block *s, unsigned secno, int n)
{
	struct buffer_head *bh;
	struct blk_plug plug;

	if (n <= 0 || unlikely(secno >= hpfs_sb(s)->sb_fs_size))
		return;

	if (unlikely(hpfs_search_hotfix_map_for_range(s, secno, n) != n))
		return;

	bh = sb_find_get_block(s, secno);
	if (bh) {
		if (buffer_uptodate(bh)) {
			brelse(bh);
			return;
		}
		brelse(bh);
	};

	blk_start_plug(&plug);
	while (n > 0) {
		if (unlikely(secno >= hpfs_sb(s)->sb_fs_size))
			break;
		sb_breadahead(s, secno);
		secno++;
		n--;
	}
	blk_finish_plug(&plug);
}

/* Map a sector into a buffer and return pointers to it and to the buffer. */

void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
		 int ahead)
{
	struct buffer_head *bh;

	hpfs_lock_assert(s);

	hpfs_prefetch_sectors(s, secno, ahead);

	cond_resched();

	*bhp = bh = sb_bread(s, hpfs_search_hotfix_map(s, secno));
	if (bh != NULL)
		return bh->b_data;
	else {
		pr_err("%s(): read error\n", __func__);
		return NULL;
	}
}

/* Like hpfs_map_sector but don't read anything */

void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp)
{
	struct buffer_head *bh;
	/*return hpfs_map_sector(s, secno, bhp, 0);*/

	hpfs_lock_assert(s);

	cond_resched();

	if ((*bhp = bh = sb_getblk(s, hpfs_search_hotfix_map(s, secno))) != NULL) {
		if (!buffer_uptodate(bh)) wait_on_buffer(bh);
		set_buffer_uptodate(bh);
		return bh->b_data;
	} else {
		pr_err("%s(): getblk failed\n", __func__);
		return NULL;
	}
}

/* Map 4 sectors into a 4buffer and return pointers to it and to the buffer. */

void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh,
		   int ahead)
{
	char *data;

	hpfs_lock_assert(s);

	cond_resched();

	if (secno & 3) {
		pr_err("%s(): unaligned read\n", __func__);
		return NULL;
	}

	hpfs_prefetch_sectors(s, secno, 4 + ahead);

	if (!hpfs_map_sector(s, secno + 0, &qbh->bh[0], 0)) goto bail0;
	if (!hpfs_map_sector(s, secno + 1, &qbh->bh[1], 0)) goto bail1;
	if (!hpfs_map_sector(s, secno + 2, &qbh->bh[2], 0)) goto bail2;
	if (!hpfs_map_sector(s, secno + 3, &qbh->bh[3], 0)) goto bail3;

	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
	    likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
		return qbh->data = qbh->bh[0]->b_data;
	}

	qbh->data = data = kmalloc(2048, GFP_NOFS);
	if (!data) {
		pr_err("%s(): out of memory\n", __func__);
		goto bail4;
	}

	memcpy(data + 0 * 512, qbh->bh[0]->b_data, 512);
	memcpy(data + 1 * 512, qbh->bh[1]->b_data, 512);
	memcpy(data + 2 * 512, qbh->bh[2]->b_data, 512);
	memcpy(data + 3 * 512, qbh->bh[3]->b_data, 512);

	return data;

 bail4:
	brelse(qbh->bh[3]);
 bail3:
	brelse(qbh->bh[2]);
 bail2:
	brelse(qbh->bh[1]);
 bail1:
	brelse(qbh->bh[0]);
 bail0:
	return NULL;
}

/* Don't read sectors */

void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
                          struct quad_buffer_head *qbh)
{
	cond_resched();

	hpfs_lock_assert(s);

	if (secno & 3) {
		pr_err("%s(): unaligned read\n", __func__);
		return NULL;
	}

	if (!hpfs_get_sector(s, secno + 0, &qbh->bh[0])) goto bail0;
	if (!hpfs_get_sector(s, secno + 1, &qbh->bh[1])) goto bail1;
	if (!hpfs_get_sector(s, secno + 2, &qbh->bh[2])) goto bail2;
	if (!hpfs_get_sector(s, secno + 3, &qbh->bh[3])) goto bail3;

	if (likely(qbh->bh[1]->b_data == qbh->bh[0]->b_data + 1 * 512) &&
	    likely(qbh->bh[2]->b_data == qbh->bh[0]->b_data + 2 * 512) &&
	    likely(qbh->bh[3]->b_data == qbh->bh[0]->b_data + 3 * 512)) {
		return qbh->data = qbh->bh[0]->b_data;
	}

	if (!(qbh->data = kmalloc(2048, GFP_NOFS))) {
		pr_err("%s(): out of memory\n", __func__);
		goto bail4;
	}
	return qbh->data;

bail4:
	brelse(qbh->bh[3]);
bail3:
	brelse(qbh->bh[2]);
bail2:
	brelse(qbh->bh[1]);
bail1:
	brelse(qbh->bh[0]);
bail0:
	return NULL;
}
	

void hpfs_brelse4(struct quad_buffer_head *qbh)
{
	if (unlikely(qbh->data != qbh->bh[0]->b_data))
		kfree(qbh->data);
	brelse(qbh->bh[0]);
	brelse(qbh->bh[1]);
	brelse(qbh->bh[2]);
	brelse(qbh->bh[3]);
}	

void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
{
	if (unlikely(qbh->data != qbh->bh[0]->b_data)) {
		memcpy(qbh->bh[0]->b_data, qbh->data + 0 * 512, 512);
		memcpy(qbh->bh[1]->b_data, qbh->data + 1 * 512, 512);
		memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
		memcpy(qbh->bh[3]->b_data, qbh->data + 3 * 512, 512);
	}
	mark_buffer_dirty(qbh->bh[0]);
	mark_buffer_dirty(qbh->bh[1]);
	mark_buffer_dirty(qbh->bh[2]);
	mark_buffer_dirty(qbh->bh[3]);
}
back to top